Skip to content

Commit 5b79fbc

Browse files
committed
This change implements the necessary changes to the articulation and actuator classes in order to configure the new actuator drive model including velocity and effort dependent constraints on motor actuation. For details see https://nvidia-omniverse.github.io/PhysX/physx/5.6.1/docs/Articulations.html#articulation-drive-stability and https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.set_dof_drive_model_properties
1 parent d6004fd commit 5b79fbc

File tree

12 files changed

+403
-346
lines changed

12 files changed

+403
-346
lines changed

docs/source/api/lab/isaaclab.actuators.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ Actuator Base
3636
:inherited-members:
3737
:exclude-members: __init__, class_type
3838

39+
.. autoclass:: isaaclab.actuators.actuator_base_cfg.ActuatorBaseCfg.DriveModelCfg
40+
:members:
41+
3942
Implicit Actuator
4043
-----------------
4144

scripts/get_omni_version.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
2+
# All rights reserved.
3+
#
4+
# SPDX-License-Identifier: BSD-3-Clause
5+
6+
import omni.kit.app
7+
8+
from isaaclab.app import AppLauncher
9+
10+
app_launcher = AppLauncher(headless=True, enable_cameras=True)
11+
simulation_app = app_launcher.app
12+
13+
app = omni.kit.app.get_app()
14+
kit_version = app.get_kit_version()
15+
print(kit_version)

source/isaaclab/config/extension.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
[package]
22

33
# Note: Semantic Versioning is used: https://semver.org/
4-
version = "0.48.1"
4+
5+
version = "0.48.2"
56

67
# Description
78
title = "Isaac Lab framework for Robot Learning"

source/isaaclab/docs/CHANGELOG.rst

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,17 @@
11
Changelog
22
---------
33

4+
0.48.2 (2025-11-10)
5+
~~~~~~~~~~~~~~~~~~~
6+
7+
Added
8+
^^^^^
9+
10+
* Implemented drive model improvements for implicit actuators allowing them to configure a new feature within physx to apply
11+
constraints on actuator effort dependent on the torque and velocity on the articulation.
12+
* Introduced a NamedTuple config classes as a way to organize related parameters, and extended the configuration parsing to
13+
work with related (mutually dependent) parameters in the configurations.
14+
415
0.48.1 (2025-11-10)
516
~~~~~~~~~~~~~~~~~~~
617

@@ -71,6 +82,16 @@ Added
7182

7283
* Added parameter :attr:`~isaaclab.terrains.TerrainImporterCfg.use_terrain_origins` to allow generated sub terrains with grid origins.
7384

85+
0.48.2 (2025-11-10)
86+
~~~~~~~~~~~~~~~~~~
87+
88+
Added
89+
^^^^^
90+
91+
* Implemented drive model improvements for implicit actuators allowing them to configure a new feature within physx to apply
92+
constraints on actuator effort dependent on the torque and velocity on the articulation.
93+
* Introduced a NamedTuple config classes as a way to organize related parameters, and extended the configuration parsing to
94+
work with related (mutually dependent) parameters in the configurations.
7495

7596
0.47.7 (2025-10-31)
7697
~~~~~~~~~~~~~~~~~~~

source/isaaclab/isaaclab/actuators/actuator_base.py

Lines changed: 113 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,12 @@
88
import torch
99
from abc import ABC, abstractmethod
1010
from collections.abc import Sequence
11-
from typing import TYPE_CHECKING, ClassVar
11+
from typing import ClassVar
1212

1313
import isaaclab.utils.string as string_utils
1414
from isaaclab.utils.types import ArticulationActions
1515

16-
if TYPE_CHECKING:
17-
from .actuator_base_cfg import ActuatorBaseCfg
16+
from .actuator_base_cfg import ActuatorBaseCfg
1817

1918

2019
class ActuatorBase(ABC):
@@ -84,6 +83,18 @@ class ActuatorBase(ABC):
8483
For implicit actuators, the :attr:`velocity_limit` and :attr:`velocity_limit_sim` are the same.
8584
"""
8685

86+
drive_model: torch.Tensor
87+
"""Three parameters for each joint/env defining the:
88+
(1) [:,:,0] speed_effort_gradient : float = 1 (default),
89+
(2) [:,:,1] maximum_actuator_velocity : float = torch.inf (default), and
90+
(3) [:,:,2] velocity_dependent_resistance : float = 1 (default)
91+
92+
which define velocity and effort dependent constraints on the motor's performance.
93+
94+
This feature is only implemented in IsaacSim v5.0.
95+
96+
The shape is (num_envs, num_joints, 3)."""
97+
8798
stiffness: torch.Tensor
8899
"""The stiffness (P gain) of the PD controller. Shape is (num_envs, num_joints)."""
89100

@@ -124,6 +135,7 @@ def __init__(
124135
viscous_friction: torch.Tensor | float = 0.0,
125136
effort_limit: torch.Tensor | float = torch.inf,
126137
velocity_limit: torch.Tensor | float = torch.inf,
138+
drive_model: torch.Tensor | tuple[float, float, float] = ActuatorBaseCfg.DriveModelCfg(),
127139
):
128140
"""Initialize the actuator.
129141
@@ -160,6 +172,9 @@ def __init__(
160172
If a tensor, then the shape is (num_envs, num_joints).
161173
velocity_limit: The default velocity limit. Defaults to infinity.
162174
If a tensor, then the shape is (num_envs, num_joints).
175+
drive_model: Drive model for the actuator including speed_effort_gradient, max_actuator_velocity, and
176+
velocity_dependent_resistance in that order. Defaults to (0.0, torch.inf, 0.0).
177+
If a tensor then the shape is (num_envs, num_joints, 3).
163178
"""
164179
# save parameters
165180
self.cfg = cfg
@@ -187,19 +202,32 @@ def __init__(
187202
("friction", friction),
188203
("dynamic_friction", dynamic_friction),
189204
("viscous_friction", viscous_friction),
205+
("drive_model", drive_model, 3),
190206
]
191-
for param_name, usd_val in to_check:
207+
for param_name, usd_val, *tuple_len in to_check:
208+
# check if the parameter requires a tuple or a single float
209+
if len(tuple_len) > 0:
210+
shape = (self._num_envs, self.num_joints, tuple_len[0])
211+
else:
212+
shape = (self._num_envs, self.num_joints)
213+
192214
cfg_val = getattr(self.cfg, param_name)
193-
setattr(self, param_name, self._parse_joint_parameter(cfg_val, usd_val))
215+
setattr(self, param_name, self._parse_joint_parameter(cfg_val, usd_val, shape, param_name=param_name))
194216
new_val = getattr(self, param_name)
195217

196218
allclose = (
197-
torch.all(new_val == usd_val) if isinstance(usd_val, (float, int)) else torch.allclose(new_val, usd_val)
219+
torch.all(new_val == usd_val)
220+
if isinstance(usd_val, (float, int))
221+
else (
222+
all([torch.all(new_val[:, :, i] == float(v)) for i, v in enumerate(usd_val)])
223+
if isinstance(usd_val, tuple)
224+
else torch.allclose(new_val, usd_val)
225+
)
198226
)
199227
if cfg_val is None or not allclose:
200228
self._record_actuator_resolution(
201229
cfg_val=getattr(self.cfg, param_name),
202-
new_val=new_val[0], # new val always has the shape of (num_envs, num_joints)
230+
new_val=new_val[0],
203231
usd_val=usd_val,
204232
joint_names=joint_names,
205233
joint_ids=joint_ids,
@@ -303,20 +331,35 @@ def _record_actuator_resolution(self, cfg_val, new_val, usd_val, joint_names, jo
303331

304332
ids = joint_ids if isinstance(joint_ids, torch.Tensor) else list(range(len(joint_names)))
305333
for idx, name in enumerate(joint_names):
306-
cfg_val_log = "Not Specified" if cfg_val is None else float(new_val[idx])
307-
default_usd_val = usd_val if isinstance(usd_val, (float, int)) else float(usd_val[0][idx])
308-
applied_val_log = default_usd_val if cfg_val is None else float(new_val[idx])
309-
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
334+
if len(new_val.shape) == 1:
335+
cfg_val_log = "Not Specified" if cfg_val is None else float(new_val[idx])
336+
default_usd_val = usd_val if isinstance(usd_val, (float, int)) else float(usd_val[0][idx])
337+
applied_val_log = default_usd_val if cfg_val is None else float(new_val[idx])
338+
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
339+
else:
340+
cfg_val_log = "Not Specified" if cfg_val is None else tuple(new_val[idx])
341+
default_usd_val = usd_val if isinstance(usd_val, (tuple)) else tuple(usd_val[0][idx][:])
342+
applied_val_log = default_usd_val if cfg_val is None else tuple(new_val[idx])
343+
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
310344

311345
def _parse_joint_parameter(
312-
self, cfg_value: float | dict[str, float] | None, default_value: float | torch.Tensor | None
346+
self,
347+
cfg_value: tuple[float, ...] | dict[str, tuple[float, ...]] | float | dict[str, float] | None,
348+
default_value: tuple[float, ...] | float | torch.Tensor | None,
349+
expected_shape: tuple[int, ...] | None = None,
350+
*,
351+
param_name: str = "No name specified",
313352
) -> torch.Tensor:
314353
"""Parse the joint parameter from the configuration.
315354
316355
Args:
317356
cfg_value: The parameter value from the configuration. If None, then use the default value.
318357
default_value: The default value to use if the parameter is None. If it is also None,
319358
then an error is raised.
359+
expected_shape: The expected shape for the tensor buffer. Usually defaults to (num_envs, num_joints).
360+
361+
Kwargs:
362+
param_name: a string with the parameter name. (Optional used only in exception messages).
320363
321364
Returns:
322365
The parsed parameter value.
@@ -325,38 +368,87 @@ def _parse_joint_parameter(
325368
TypeError: If the parameter value is not of the expected type.
326369
TypeError: If the default value is not of the expected type.
327370
ValueError: If the parameter value is None and no default value is provided.
328-
ValueError: If the default value tensor is the wrong shape.
371+
ValueError: If a tensor or tuple is the wrong shape.
329372
"""
373+
if expected_shape is None:
374+
expected_shape = (self._num_envs, self.num_joints)
330375
# create parameter buffer
331-
param = torch.zeros(self._num_envs, self.num_joints, device=self._device)
376+
param = torch.zeros(*expected_shape, device=self._device)
377+
332378
# parse the parameter
333379
if cfg_value is not None:
334380
if isinstance(cfg_value, (float, int)):
335381
# if float, then use the same value for all joints
336382
param[:] = float(cfg_value)
383+
elif isinstance(cfg_value, tuple):
384+
# if tuple, ensure we expect a tuple for this parameter
385+
if len(expected_shape) < 3:
386+
raise TypeError(
387+
f"Invalid type for parameter value: {type(cfg_value)} for parameter {param_name}"
388+
+ f" actuator on joints {self.joint_names}. Expected float or dict, got tuple"
389+
)
390+
# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
391+
if not len(cfg_value) == expected_shape[2]:
392+
raise ValueError(
393+
f"Invalid tuple length for parameter {param_name}, got {len(cfg_value)}, expected"
394+
+ f" {expected_shape[2]}"
395+
)
396+
for i, v in enumerate(cfg_value):
397+
param[:, :, i] = float(v)
337398
elif isinstance(cfg_value, dict):
338399
# if dict, then parse the regular expression
339-
indices, _, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
340-
# note: need to specify type to be safe (e.g. values are ints, but we want floats)
341-
param[:, indices] = torch.tensor(values, dtype=torch.float, device=self._device)
400+
indices, j, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
401+
# if the expected shape has two dimensions, we expect floats
402+
if len(expected_shape) < 3:
403+
# note: need to specify type to be safe (e.g. values are ints, but we want floats)
404+
param[:, indices] = torch.tensor(values, dtype=torch.float, device=self._device)
405+
# otherwise, we expect tuples
406+
else:
407+
# We can't directly assign tuples to tensors, so iterate through them
408+
for i, v in enumerate(values):
409+
# Raise an exception if the tuple is the incorrect length
410+
if len(v) != expected_shape[2]:
411+
raise ValueError(
412+
f"Invalid tuple length for parameter {param_name} on joint {j[i]} at index"
413+
f" {indices[i]}, "
414+
+ f"expected {expected_shape[2]} got {len(v)}."
415+
)
416+
# Otherwise iterate through the tuple, and assign the values in order.
417+
for i2, v2 in enumerate(v):
418+
param[:, indices[i], i2] = float(v2)
342419
else:
343420
raise TypeError(
344421
f"Invalid type for parameter value: {type(cfg_value)} for "
345-
+ f"actuator on joints {self.joint_names}. Expected float or dict."
422+
+ f"actuator on joints {self.joint_names}. Expected tuple, float or dict."
346423
)
347424
elif default_value is not None:
348425
if isinstance(default_value, (float, int)):
349426
# if float, then use the same value for all joints
350427
param[:] = float(default_value)
428+
elif isinstance(default_value, tuple):
429+
# if tuple, ensure we expect a tuple for this parameter
430+
if len(expected_shape) < 3:
431+
raise TypeError(
432+
f"Invalid default type for parameter value: {type(default_value)} for "
433+
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"
434+
)
435+
# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
436+
if not len(default_value) == expected_shape[2]:
437+
raise ValueError(
438+
f"Invalid tuple length for parameter {param_name}, got {len(default_value)}, expected"
439+
+ f" {expected_shape[2]}"
440+
)
441+
for i, v in enumerate(default_value):
442+
param[:, :, i] = float(v)
351443
elif isinstance(default_value, torch.Tensor):
352444
# if tensor, then use the same tensor for all joints
353-
if default_value.shape == (self._num_envs, self.num_joints):
445+
if tuple(default_value.shape) == expected_shape:
354446
param = default_value.float()
355447
else:
356448
raise ValueError(
357449
"Invalid default value tensor shape.\n"
358-
f"Got: {default_value.shape}\n"
359-
f"Expected: {(self._num_envs, self.num_joints)}"
450+
+ f"Got: {tuple(default_value.shape)}\n"
451+
+ f"Expected: {expected_shape}"
360452
)
361453
else:
362454
raise TypeError(

source/isaaclab/isaaclab/actuators/actuator_base_cfg.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
# SPDX-License-Identifier: BSD-3-Clause
55

66
from dataclasses import MISSING
7+
from torch import inf
8+
from typing import NamedTuple
79

810
from isaaclab.utils import configclass
911

@@ -48,6 +50,18 @@ class ActuatorBaseCfg:
4850
4951
"""
5052

53+
"""Optional (min v5) settings to the drive model capturing performance envelope velocity-effort dependence.
54+
55+
See: https://docs.omniverse.nvidia.com/kit/docs/omni_physics/107.3/_downloads/f44e831b7f29e7c2ec8e3f2c54418430/drivePerformanceEnvelope.pdf
56+
"""
57+
58+
class DriveModelCfg(NamedTuple):
59+
speed_effort_gradient: float = 0.0
60+
max_actuator_velocity: float = inf
61+
velocity_dependent_resistance: float = 0.0
62+
63+
drive_model: dict[str, DriveModelCfg] | DriveModelCfg | None = None
64+
5165
velocity_limit: dict[str, float] | float | None = None
5266
"""Velocity limit of the joints in the group. Defaults to None.
5367

0 commit comments

Comments
 (0)