curobo.types module

Common data types module.

This module provides common data types used throughout CuRobo for representing robot states, poses, camera observations, and tensor device configurations.

Example

```python from curobo.types import JointState, Pose, CameraObservation, DeviceCfg

# Create joint state joint_state = JointState.from_position([0.0, -0.5, 0.0, -2.0, 0.0, 1.5, 0.0])

# Create pose pose = Pose(

position=[0.5, 0.0, 0.5], quaternion=[1, 0, 0, 0] # w, x, y, z

)

# Create camera observation camera_obs = CameraObservation(

depth_image=depth_tensor, intrinsics=camera_intrinsics, pose=camera_pose,

)

# Specify tensor device and dtype device_cfg = DeviceCfg(device=”cuda:0”, dtype=torch.float32) ```

class JointState(
position,
velocity=None,
acceleration=None,
joint_names=None,
jerk=None,
device_cfg=DeviceCfg(device=device(type='cuda',
index=0),
dtype=torch.float32,
collision_geometry_dtype=torch.float32,
collision_gradient_dtype=torch.float32,
collision_distance_dtype=torch.float32),
dt=None,
aux_data=<factory>,
knot=None,
knot_dt=None,
control_space=None,
)

Bases: curobo._src.state.state_base.State

Joint-space robot state (position and optional derivatives).

Convention: use joint_state or js for JointState objects; use q for raw position tensors.

Parameters:
position: List[float] | torch.Tensor
velocity: List[float] | torch.Tensor | None = None
acceleration: List[float] | torch.Tensor | None = None
joint_names: List[str] | None = None
jerk: List[float] | torch.Tensor | None = None
device_cfg: curobo._src.types.device_cfg.DeviceCfg = DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32)
dt: torch.Tensor | None = None
aux_data: dict
knot: torch.Tensor | None = None
knot_dt: torch.Tensor | None = None
control_space: curobo._src.types.control_space.ControlSpace | None = None
data_ptr()
property device: torch.device
property dtype: torch.dtype
property shape: torch.Size
property ndim: int
static from_numpy(
joint_names,
position,
velocity=None,
acceleration=None,
jerk=None,
device_cfg=DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
)
Parameters:
static from_position(
position,
joint_names=None,
)
Parameters:
static from_state_tensor(
state_tensor,
joint_names=None,
dof=7,
)
static from_list(
position,
velocity,
acceleration,
device_cfg,
)
Parameters:

device_cfg (DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32))

static zeros(
size,
device_cfg,
joint_names=None,
)
Parameters:
to(device_cfg)
Parameters:

device_cfg (curobo._src.types.device_cfg.DeviceCfg)

clone()
detach()
copy_reference(
in_joint_state,
)

Copy reference to in_joint_state

Parameters:

in_joint_state (curobo._src.state.state_joint.JointState)

copy_(
in_joint_state,
allow_clone=True,
)
Parameters:
copy_data(in_joint_state)

Copy data from in_joint_state to self

Parameters:

in_joint_state (curobo._src.state.state_joint.JointState)

unsqueeze(idx)
Parameters:

idx (int)

squeeze(dim=0)
Parameters:

dim (int | None)

view(*shape)
blend(coeff, new_state)
Parameters:
get_state_tensor()
stack(new_state)
Parameters:

new_state (curobo._src.state.state_joint.JointState)

cat(other_js, dim)
Parameters:
repeat(repeat_input)
Parameters:

repeat_input (List[int])

repeat_seeds(num_seeds)
Parameters:

num_seeds (int)

apply_kernel(kernel_mat)
scale(dt)
Parameters:

dt (float | torch.Tensor)

scale_by_dt(dt, new_dt)
Parameters:
scale_time(new_dt)
Parameters:

new_dt (torch.Tensor)

calculate_fd_from_position(
dt=None,
)
Parameters:

dt (torch.Tensor | None)

reorder(joint_names)

Reorder joint state to match the given joint names order.

Parameters:

joint_names (List[str]) – List of joint names in the desired order.

Return type:

JointState

Returns:

New JointState with joints reordered to match joint_names.

reindex(joint_names)

Reindex joint state in-place to match the given joint names order.

Parameters:

joint_names (List[str]) – List of joint names in the desired order.

get_augmented_joint_state(
joint_names,
lock_joints=None,
)
Return type:

JointState

Parameters:

lock_joints (curobo._src.state.state_joint.JointState | None)

append_joints(joint_state)
Parameters:

joint_state (curobo._src.state.state_joint.JointState)

gather_by_seed_index(idx)
Parameters:

idx (torch.Tensor)

copy_only_index(
in_joint_state,
idx,
)
Parameters:
copy_at_index(
in_joint_state,
idx,
)
Parameters:
copy_at_batch_seed_indices(
in_joint_state,
batch_idx,
seed_idx,
)
Parameters:
get_trajectory_at_horizon_index(
horizon_index,
)
Parameters:

horizon_index (int)

trim_trajectory(
start_idx,
end_idx=None,
)
Parameters:
  • start_idx (int)

  • end_idx (int | None)

index_dof(idx)
Parameters:

idx (int)

__init__(
position,
velocity=None,
acceleration=None,
joint_names=None,
jerk=None,
device_cfg=DeviceCfg(device=device(type='cuda',
index=0),
dtype=torch.float32,
collision_geometry_dtype=torch.float32,
collision_gradient_dtype=torch.float32,
collision_distance_dtype=torch.float32),
dt=None,
aux_data=<factory>,
knot=None,
knot_dt=None,
control_space=None,
)
Parameters:
Return type:

None

class RobotState(
joint_state,
joint_torque=None,
cuda_robot_model_state=None,
)

Bases: curobo._src.state.state_base.State

Parameters:
joint_state: curobo._src.state.state_joint.JointState
joint_torque: torch.Tensor | None = None
cuda_robot_model_state: curobo._src.robot.kinematics.kinematics_state.KinematicsState | None = None
data_ptr()
detach()
property robot_spheres: torch.Tensor | None
property tool_poses: curobo._src.types.tool_pose.ToolPose | None
property tool_frames: List[str]
copy_at_batch_seed_indices(
other,
batch_idx,
seed_idx,
)

Copy robot state at specific batch and seed indices

Parameters:
copy_only_index(other, index)

Copy robot state at specific indices

Parameters:
Return type:

Pose

Parameters:

link_name (str)

clone()
Return type:

RobotState

copy_(other)
Parameters:

other (curobo._src.state.state_robot.RobotState)

__init__(
joint_state,
joint_torque=None,
cuda_robot_model_state=None,
)
Parameters:
Return type:

None

class Pose(
position=None,
quaternion=None,
rotation=None,
batch_size=1,
name='ee_link',
normalize_rotation=False,
)

Bases: Sequence

Pose representation used in CuRobo. You can initialize a pose by calling pose = Pose(position, quaternion).

Parameters:
position: torch.Tensor | None = None

Position is represented as x, y, z, in meters

quaternion: torch.Tensor | None = None

Quaternion is represented as w, x, y, z.

rotation: torch.Tensor | None = None

Rotation is represents orientation as a 3x3 rotation matrix

batch_size: int = 1

Batch size will be initialized from input.

name: str = 'ee_link'

name of link that this pose represents.

normalize_rotation: bool = False

quaternion input will be normalized when this flag is enabled. This is recommended when a pose comes from an external source as some programs do not send normalized quaternions.

detach()
requires_grad_(requires_grad)
Parameters:

requires_grad (bool)

property batch
property device
property ndim
static from_matrix(matrix)
Parameters:

matrix (numpy.ndarray | torch.Tensor)

classmethod from_euler_xyz(
euler_xyz,
position=None,
)

Create a Pose from XYZ Euler angles and optional position.

This uses the extrinsic XYZ convention: rotations are applied around fixed world axes in order X, Y, Z.

Parameters:
  • euler_xyz (Tensor) – […, 3] Euler angles [rx, ry, rz] in radians.

  • position (Optional[Tensor]) – Optional […, 3] position. If None, defaults to zero.

Return type:

Pose

Returns:

Pose with the specified position (or zero) and the corresponding quaternion.

classmethod from_euler_xyz_intrinsic(
euler_xyz,
position=None,
)

Create a Pose from XYZ Euler angles using intrinsic convention.

Intrinsic XYZ: each rotation is applied around the current body-frame axis (X → Y → Z). This matches URDF virtual base joint chains where successive revolute joints rotate in the parent frame.

See from_euler_xyz for the extrinsic convention.

Parameters:
  • euler_xyz (Tensor) – […, 3] Euler angles [rx, ry, rz] in radians.

  • position (Optional[Tensor]) – Optional […, 3] position. If None, defaults to zero.

Return type:

Pose

Returns:

Pose with the specified position (or zero) and the corresponding quaternion.

get_rotation_matrix()
get_rotation()
stack(other_pose)
Parameters:

other_pose (curobo._src.types.pose.Pose)

repeat(n)

Repeat pose

Parameters:

n ([type]) – [description]

unsqueeze(dim=-1)
squeeze(dim=-1)
repeat_seeds(num_seeds)
Parameters:

num_seeds (int)

get_index(b, n=None)
Return type:

Pose

Parameters:
apply_kernel(kernel_mat)
classmethod from_numpy(
position,
quaternion,
device_cfg=DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
)
Parameters:
classmethod from_list(
pose,
device_cfg=DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
q_xyzw=False,
)
Parameters:
classmethod from_batch_list(
pose,
device_cfg=DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
q_xyzw=False,
)
Parameters:
to_list(q_xyzw=False)
tolist(q_xyzw=False)
clone()
to(device_cfg=None, device=None)
Parameters:
get_matrix(out_matrix=None)
Parameters:

out_matrix (torch.Tensor | None)

get_affine_matrix(out_matrix=None)
Parameters:

out_matrix (torch.Tensor | None)

get_numpy_affine_matrix()
get_numpy_matrix()
inverse()

Inverse of pose

Returns:

inverse pose

Return type:

Pose

get_pose_vector()
copy_(pose)

Copies pose data from another memory buffer. This will create a new instance if buffers are not same shape

Parameters:

pose (Pose) – _description_

static cat(pose_list)
Parameters:

pose_list (List[curobo._src.types.pose.Pose])

distance(
other_pose,
use_phi3=False,
)
Parameters:
angular_distance(
other_pose,
use_phi3=False,
)

This function computes the angular distance using either phi_3 or axis-angle.

See Huynh, Du Q. “Metrics for 3D rotations: Comparison and analysis.” Journal of Mathematical Imaging and Vision 35 (2009): 155-164 for phi_3 metric.

Parameters:
Returns:

[…]

Return type:

Angular distance in range [0,1]. Shape

linear_distance(other_pose)
Parameters:

other_pose (curobo._src.types.pose.Pose)

multiply(
other_pose,
out_position=None,
out_quaternion=None,
)
Parameters:
transform_point(
points,
out_buffer=None,
gp_out=None,
gq_out=None,
gpt_out=None,
)
Parameters:
transform_points(
points,
out_buffer=None,
gp_out=None,
gq_out=None,
gpt_out=None,
)
Parameters:
batch_transform_points(
points,
out_buffer=None,
gp_out=None,
gq_out=None,
gpt_out=None,
)
Parameters:
batch_transform_points_inverse(
points,
out_buffer=None,
gp_out=None,
gq_out=None,
gpt_out=None,
)
Parameters:
property shape
compute_offset_pose(offset)
Return type:

Pose

Parameters:

offset (curobo._src.types.pose.Pose)

compute_local_pose(world_pose)
Return type:

Pose

Parameters:

world_pose (curobo._src.types.pose.Pose)

contiguous()
Return type:

Pose

__init__(
position=None,
quaternion=None,
rotation=None,
batch_size=1,
name='ee_link',
normalize_rotation=False,
)
Parameters:
Return type:

None

class ToolPose(
tool_frames,
position,
quaternion,
)

Bases: Sequence

4D FK output: [batch, horizon, num_links, 3/4].

No goalset dimension; this represents the current state of the robot, not a goal target.

Parameters:
tool_frames: List[str]

Link/frame names, length must equal position.shape[2].

position: torch.Tensor

Position tensor [batch, horizon, num_links, 3].

quaternion: torch.Tensor

Quaternion tensor [batch, horizon, num_links, 4] (wxyz).

property batch_size: int
property horizon: int
property shape
property ndim
property device

Extract a single link as a 2D Pose [B*H, 3/4].

Return type:

Pose

Parameters:
  • link_name (str)

  • make_contiguous (bool)

to_dict(make_contiguous=True)

Convert to dictionary mapping link names to 2D Poses.

Return type:

Dict[str, Pose]

Parameters:

make_contiguous (bool)

copy_(other)
Parameters:

other (curobo._src.types.tool_pose.ToolPose)

requires_grad_(requires_grad)
Parameters:

requires_grad (bool)

clone()
Return type:

ToolPose

detach()
Return type:

ToolPose

contiguous()
Return type:

ToolPose

Reorder links. Returns a new ToolPose.

Return type:

ToolPose

Parameters:

ordered_tool_frames (List[str])

as_goal(
ordered_tool_frames=None,
)

Convert to GoalToolPose by adding a goalset dimension (num_goalset=1).

Parameters:

ordered_tool_frames (Optional[List[str]]) – Optionally reorder/select a subset of links.

Return type:

GoalToolPose

Returns:

GoalToolPose [B, H, L, 1, 3/4].

__init__(
tool_frames,
position,
quaternion,
)
Parameters:
Return type:

None

class GoalToolPose(
tool_frames,
position,
quaternion,
)

Bases: Sequence

5D goal specification: [batch, horizon, num_links, num_goalset, 3/4].

Used only on the goal/target side. horizon=1 for static goals, horizon>1 for per-timestep targets.

Parameters:
tool_frames: List[str]

Link/frame names, length must equal position.shape[2].

position: torch.Tensor

Position tensor [batch, horizon, num_links, num_goalset, 3].

quaternion: torch.Tensor

Quaternion tensor [batch, horizon, num_links, num_goalset, 4] (wxyz).

property batch_size: int
property horizon: int
property num_goalset: int
property shape
property ndim
property device
classmethod from_poses(
pose_dict,
ordered_tool_frames=None,
num_goalset=1,
)

Build from per-link Pose objects.

Each Pose has position [batch, 3] or [batch * num_goalset, 3].

Return type:

GoalToolPose

Returns:

GoalToolPose [batch, 1, num_links, num_goalset, 3/4] (horizon=1).

Parameters:

Extract a single link as a 2D Pose [B*H*G, 3/4].

Return type:

Pose

Parameters:
  • link_name (str)

  • make_contiguous (bool)

to_dict(
make_contiguous=True,
)
Return type:

Dict[str, Pose]

Parameters:

make_contiguous (bool)

copy_(other)
Parameters:

other (curobo._src.types.tool_pose.GoalToolPose)

requires_grad_(
requires_grad,
)
Parameters:

requires_grad (bool)

clone()
Return type:

GoalToolPose

detach()
Return type:

GoalToolPose

Return type:

GoalToolPose

Parameters:

ordered_tool_frames (List[str])

__init__(
tool_frames,
position,
quaternion,
)
Parameters:
Return type:

None

class ToolPoseCriteria(
terminal_pose_axes_weight_factor=None,
non_terminal_pose_axes_weight_factor=None,
terminal_pose_convergence_tolerance=None,
non_terminal_pose_convergence_tolerance=None,
project_distance_to_goal=False,
device_cfg=DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
)

Bases: object

Criteria for a link pose.

This class is used to define the nature of the cost between the current pose and the goal pose. This used as part of the goalset cost term.

Parameters:
terminal_pose_axes_weight_factor: torch.Tensor | List[float] | None = None

Factor vector that scales each axis (x,y,z,roll,pitch,yaw) of the terminal position and orientation. This is multiplied with the weight.

non_terminal_pose_axes_weight_factor: torch.Tensor | List[float] | None = None

Factor vector that scales each axis (x,y,z,roll,pitch,yaw) of the non-terminal position and orientation. This is multiplied with the weight.

terminal_pose_convergence_tolerance: torch.Tensor | List[float] | None = None

Convergence tolerance for the terminal position and orientation. This should be of shape (2,). Position unit is meter and orientation unit is radian.

non_terminal_pose_convergence_tolerance: torch.Tensor | List[float] | None = None

Convergence tolerance for the non-terminal position and orientation. This should be of shape (2,). Position unit is meter and orientation unit is radian.

project_distance_to_goal: torch.Tensor | bool = False

If true, the distance is computed after projecting the current pose to the goal frame.

device_cfg: curobo._src.types.device_cfg.DeviceCfg = DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32)
clone()
copy_(other)
Parameters:

other (curobo._src.cost.tool_pose_criteria.ToolPoseCriteria)

static track_position(
xyz=[1.0, 1.0, 1.0],
)
Parameters:

xyz (List[float])

static track_orientation(
rpy=[0.001, 0.001, 0.001],
non_terminal_scale=1.0,
)
Parameters:
static track_position_and_orientation(
xyz=[1.0, 1.0, 1.0],
rpy=[1.0, 1.0, 1.0],
non_terminal_scale=0.1,
)
Parameters:
static linear_motion(
axis='z',
non_terminal_scale=1.0,
project_distance_to_goal=True,
)
Parameters:
  • axis (str)

  • non_terminal_scale (float)

  • project_distance_to_goal (bool)

static disabled()

Create criteria that disables pose tracking for this tool frame.

Use this when you want to include a tool frame in the solver but not apply any pose cost to it.

Returns:

ToolPoseCriteria with all weight factors set to zero.

__init__(
terminal_pose_axes_weight_factor=None,
non_terminal_pose_axes_weight_factor=None,
terminal_pose_convergence_tolerance=None,
non_terminal_pose_convergence_tolerance=None,
project_distance_to_goal=False,
device_cfg=DeviceCfg(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
)
Parameters:
Return type:

None

class CameraObservation(
name='camera_image',
rgb_image=None,
depth_image=None,
image_segmentation=None,
projection_matrix=None,
projection_rays=None,
resolution=None,
pose=None,
intrinsics=None,
timestamp=None,
depth_to_meter=0.001,
feature_grid=None,
)

Bases: object

Parameters:
name: str = 'camera_image'
rgb_image: torch.Tensor | None = None

rgb image format is BxHxWxchannels

depth_image: torch.Tensor | None = None
image_segmentation: torch.Tensor | None = None
projection_matrix: torch.Tensor | None = None
projection_rays: torch.Tensor | None = None
resolution: List[int] | None = None
pose: curobo._src.types.pose.Pose | None = None
intrinsics: torch.Tensor | None = None

Batch of intrinsics matrices of shape (b, 3, 3). contains the following fields: [[fx, 0, cx], [0, fy, cy], [0, 0, 1]]

Type:

intrinsics_matrix

timestamp: torch.Tensor | None = None
depth_to_meter: float = 0.001
feature_grid: torch.Tensor | None = None

Optional neural feature grid. Shape (num_cameras, feature_H, feature_W, feature_dim) float16, channels-last with stride 1 on the channel dim. Dense per-pixel features are represented by feature_H == image_H and feature_W == image_W. Consumed by the mapper’s per-block feature integration kernel when feature_dim > 0.

Magnitude convention: per-element values must be O(1) (e.g. L2-normalized embeddings). The mapper stores per-block feature accumulators as fp16 with a post-frame weight cap; inputs well outside this range can overflow the per-thread footprint sum within a single frame before the cap engages.

filter_depth(
distance=0.01,
)
Parameters:

distance (float)

property shape
copy_(new_data)
Parameters:

new_data (curobo._src.types.camera.CameraObservation)

clone()
to(device)
Parameters:

device (torch.device)

get_pointcloud(
project_to_pose=False,
)
Parameters:

project_to_pose (bool)

extract_depth_from_structured_pointcloud(
pointcloud,
output_image=None,
)

Extract depth image from structured pointcloud.

This function assumes the pointcloud maintains the spatial grid structure [batch, height, width, 3] where Z-axis represents depth values.

Important: This only works for structured pointclouds in camera frame where Z-axis is aligned with depth. If the pointcloud has been transformed to world frame (e.g., via self.pose), this will NOT work correctly.

Parameters:
  • pointcloud – Structured pointcloud in camera frame. Shape: [h, w, 3] or [b, h, w, 3].

  • output_image (Optional[Tensor]) – Optional pre-allocated output tensor. Shape: [h, w] or [b, h, w].

Returns:

[b, h, w].

Return type:

Depth image extracted from Z-coordinates. Shape

update_projection_rays()
stack(
new_observation,
dim=0,
)
Parameters:
save_to_file(
file_path,
)
Parameters:

file_path (str)

__init__(
name='camera_image',
rgb_image=None,
depth_image=None,
image_segmentation=None,
projection_matrix=None,
projection_rays=None,
resolution=None,
pose=None,
intrinsics=None,
timestamp=None,
depth_to_meter=0.001,
feature_grid=None,
)
Parameters:
Return type:

None

class ContentPath(
robot_config_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/robot'),
robot_xrdf_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/robot'),
robot_urdf_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets'),
robot_asset_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets'),
scene_config_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/scene'),
world_asset_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets'),
robot_config_absolute_path=None,
robot_xrdf_absolute_path=None,
robot_urdf_absolute_path=None,
robot_asset_absolute_path=None,
scene_config_absolute_path=None,
robot_config_file=None,
robot_xrdf_file=None,
robot_urdf_file=None,
robot_asset_subroot_path=None,
scene_config_file=None,
)

Bases: object

Dataclass to store root path of configuration and assets.

Parameters:
  • robot_config_root_path (str)

  • robot_xrdf_root_path (str)

  • robot_urdf_root_path (str)

  • robot_asset_root_path (str)

  • scene_config_root_path (str)

  • world_asset_root_path (str)

  • robot_config_absolute_path (str | None)

  • robot_xrdf_absolute_path (str | None)

  • robot_urdf_absolute_path (str | None)

  • robot_asset_absolute_path (str | None)

  • scene_config_absolute_path (str | None)

  • robot_config_file (str | None)

  • robot_xrdf_file (str | None)

  • robot_urdf_file (str | None)

  • robot_asset_subroot_path (str | None)

  • scene_config_file (str | None)

robot_config_root_path: str = PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/robot')

Root path for robot configuration file, either xrdf or yml.

robot_xrdf_root_path: str = PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/robot')

Root path for robot XRDF.

robot_urdf_root_path: str = PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets')

Root path for robot URDF.

robot_asset_root_path: str = PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets')

Root path for robot meshes and textures.

scene_config_root_path: str = PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/scene')

Root path for world description files (yml).

world_asset_root_path: str = PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets')

Root path for world assets (meshes, nvblox maps).

robot_config_absolute_path: str | None = None

Absolute path to the robot configuration file. If this is provided, the :var:`robot_config_root_path`` will be ignored.

robot_xrdf_absolute_path: str | None = None

Absolute path to the robot XRDF file. If this is provided, the :var:`robot_xrdf_root_path` will be ignored.

robot_urdf_absolute_path: str | None = None

Absolute path to the robot URDF file. If this is provided, the :var:`robot_urdf_root_path` will be ignored.

robot_asset_absolute_path: str | None = None

Absolute path to the robot assets. If this is provided, the :var:`robot_asset_root_path` will be ignored.

scene_config_absolute_path: str | None = None

Absolute path to the world description file. If this is provided, the :var:`scene_config_root_path` will be ignored.

robot_config_file: str | None = None

Relative path to the robot configuration file. If this is provided, the robot_config_absolute_path is initialized with the concatenation of robot_config_root_path and robot_config_file.

robot_xrdf_file: str | None = None

Relative path to the robot XRDF file. If this is provided, the robot_xrdf_absolute_path is initialized with the concatenation of robot_xrdf_root_path and robot_xrdf_file.

robot_urdf_file: str | None = None

Relative path to the robot URDF file. If this is provided, the robot_urdf_absolute_path is initialized with the concatenation of robot_urdf_root_path and robot_urdf_file.

robot_asset_subroot_path: str | None = None

Relative path to the robot assets.

scene_config_file: str | None = None

Relative path to the world description file. If this is provided, the scene_config_absolute_path is initialized with the concatenation of scene_config_root_path and scene_config_file.

get_robot_configuration_path()

Get the robot configuration path.

__init__(
robot_config_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/robot'),
robot_xrdf_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/robot'),
robot_urdf_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets'),
robot_asset_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets'),
scene_config_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/configs/scene'),
world_asset_root_path=PosixPath('/home/runner/work/curobo/curobo/curobo/content/assets'),
robot_config_absolute_path=None,
robot_xrdf_absolute_path=None,
robot_urdf_absolute_path=None,
robot_asset_absolute_path=None,
scene_config_absolute_path=None,
robot_config_file=None,
robot_xrdf_file=None,
robot_urdf_file=None,
robot_asset_subroot_path=None,
scene_config_file=None,
)
Parameters:
  • robot_config_root_path (str)

  • robot_xrdf_root_path (str)

  • robot_urdf_root_path (str)

  • robot_asset_root_path (str)

  • scene_config_root_path (str)

  • world_asset_root_path (str)

  • robot_config_absolute_path (str | None)

  • robot_xrdf_absolute_path (str | None)

  • robot_urdf_absolute_path (str | None)

  • robot_asset_absolute_path (str | None)

  • scene_config_absolute_path (str | None)

  • robot_config_file (str | None)

  • robot_xrdf_file (str | None)

  • robot_urdf_file (str | None)

  • robot_asset_subroot_path (str | None)

  • scene_config_file (str | None)

Return type:

None

class DeviceCfg(
device=device(type='cuda', index=0),
dtype=torch.float32,
collision_geometry_dtype=torch.float32,
collision_gradient_dtype=torch.float32,
collision_distance_dtype=torch.float32,
)

Bases: object

Configuration for device and data types used in tensor operations.

Parameters:
device: torch.device = device(type='cuda', index=0)
dtype: torch.dtype = torch.float32
collision_geometry_dtype: torch.dtype = torch.float32
collision_gradient_dtype: torch.dtype = torch.float32
collision_distance_dtype: torch.dtype = torch.float32
static from_basic(device, dev_id)
Parameters:
to_device(data_tensor)
to_int8_device(data_tensor)
cpu()
as_torch_dict()
is_same_torch_device(other)

Check if a torch.device refers to the same physical device as this config.

Handles the case where “cuda” and “cuda:0” are equivalent devices.

Parameters:

other (device) – torch.device to compare against.

Return type:

bool

Returns:

True if devices refer to the same physical device.

__init__(
device=device(type='cuda', index=0),
dtype=torch.float32,
collision_geometry_dtype=torch.float32,
collision_gradient_dtype=torch.float32,
collision_distance_dtype=torch.float32,
)
Parameters:
Return type:

None