chore: replace hard-coded obs values with constants throughout all the source code (#2037)
* chore: replace hard-coded OBS values with constants throughout all the source code * chore(tests): replace hard-coded OBS values with constants throughout all the test code
This commit is contained in:
@@ -11,7 +11,7 @@ import torch
|
||||
from lerobot.configs.types import FeatureType, PipelineFeatureType, PolicyFeature
|
||||
from lerobot.processor import DataProcessorPipeline, TokenizerProcessorStep, TransitionKey
|
||||
from lerobot.processor.converters import create_transition, identity_transition
|
||||
from lerobot.utils.constants import OBS_LANGUAGE
|
||||
from lerobot.utils.constants import OBS_IMAGE, OBS_LANGUAGE, OBS_STATE
|
||||
from tests.utils import require_package
|
||||
|
||||
|
||||
@@ -503,16 +503,14 @@ def test_features_basic():
|
||||
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=128)
|
||||
|
||||
input_features = {
|
||||
PipelineFeatureType.OBSERVATION: {
|
||||
"observation.state": PolicyFeature(type=FeatureType.STATE, shape=(10,))
|
||||
},
|
||||
PipelineFeatureType.OBSERVATION: {OBS_STATE: PolicyFeature(type=FeatureType.STATE, shape=(10,))},
|
||||
PipelineFeatureType.ACTION: {"action": PolicyFeature(type=FeatureType.ACTION, shape=(5,))},
|
||||
}
|
||||
|
||||
output_features = processor.transform_features(input_features)
|
||||
|
||||
# Check that original features are preserved
|
||||
assert "observation.state" in output_features[PipelineFeatureType.OBSERVATION]
|
||||
assert OBS_STATE in output_features[PipelineFeatureType.OBSERVATION]
|
||||
assert "action" in output_features[PipelineFeatureType.ACTION]
|
||||
|
||||
# Check that tokenized features are added
|
||||
@@ -797,7 +795,7 @@ def test_device_detection_cpu():
|
||||
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
|
||||
|
||||
# Create transition with CPU tensors
|
||||
observation = {"observation.state": torch.randn(10)} # CPU tensor
|
||||
observation = {OBS_STATE: torch.randn(10)} # CPU tensor
|
||||
action = torch.randn(5) # CPU tensor
|
||||
transition = create_transition(
|
||||
observation=observation, action=action, complementary_data={"task": "test task"}
|
||||
@@ -821,7 +819,7 @@ def test_device_detection_cuda():
|
||||
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
|
||||
|
||||
# Create transition with CUDA tensors
|
||||
observation = {"observation.state": torch.randn(10).cuda()} # CUDA tensor
|
||||
observation = {OBS_STATE: torch.randn(10).cuda()} # CUDA tensor
|
||||
action = torch.randn(5).cuda() # CUDA tensor
|
||||
transition = create_transition(
|
||||
observation=observation, action=action, complementary_data={"task": "test task"}
|
||||
@@ -847,7 +845,7 @@ def test_device_detection_multi_gpu():
|
||||
|
||||
# Test with tensors on cuda:1
|
||||
device = torch.device("cuda:1")
|
||||
observation = {"observation.state": torch.randn(10).to(device)}
|
||||
observation = {OBS_STATE: torch.randn(10).to(device)}
|
||||
action = torch.randn(5).to(device)
|
||||
transition = create_transition(
|
||||
observation=observation, action=action, complementary_data={"task": "multi gpu test"}
|
||||
@@ -943,7 +941,7 @@ def test_device_detection_preserves_dtype():
|
||||
processor = TokenizerProcessorStep(tokenizer=mock_tokenizer, max_length=10)
|
||||
|
||||
# Create transition with float tensor (to test dtype isn't affected)
|
||||
observation = {"observation.state": torch.randn(10, dtype=torch.float16)}
|
||||
observation = {OBS_STATE: torch.randn(10, dtype=torch.float16)}
|
||||
transition = create_transition(observation=observation, complementary_data={"task": "dtype test"})
|
||||
|
||||
result = processor(transition)
|
||||
@@ -977,7 +975,7 @@ def test_integration_with_device_processor(mock_auto_tokenizer):
|
||||
|
||||
# Start with CPU tensors
|
||||
transition = create_transition(
|
||||
observation={"observation.state": torch.randn(10)}, # CPU
|
||||
observation={OBS_STATE: torch.randn(10)}, # CPU
|
||||
action=torch.randn(5), # CPU
|
||||
complementary_data={"task": "pipeline test"},
|
||||
)
|
||||
@@ -985,7 +983,7 @@ def test_integration_with_device_processor(mock_auto_tokenizer):
|
||||
result = robot_processor(transition)
|
||||
|
||||
# All tensors should end up on CUDA (moved by DeviceProcessorStep)
|
||||
assert result[TransitionKey.OBSERVATION]["observation.state"].device.type == "cuda"
|
||||
assert result[TransitionKey.OBSERVATION][OBS_STATE].device.type == "cuda"
|
||||
assert result[TransitionKey.ACTION].device.type == "cuda"
|
||||
|
||||
# Tokenized tensors should also be on CUDA
|
||||
@@ -1005,8 +1003,8 @@ def test_simulated_accelerate_scenario():
|
||||
# Simulate Accelerate scenario: batch already on GPU
|
||||
device = torch.device("cuda:0")
|
||||
observation = {
|
||||
"observation.state": torch.randn(1, 10).to(device), # Batched, on GPU
|
||||
"observation.image": torch.randn(1, 3, 224, 224).to(device), # Batched, on GPU
|
||||
OBS_STATE: torch.randn(1, 10).to(device), # Batched, on GPU
|
||||
OBS_IMAGE: torch.randn(1, 3, 224, 224).to(device), # Batched, on GPU
|
||||
}
|
||||
action = torch.randn(1, 5).to(device) # Batched, on GPU
|
||||
|
||||
|
||||
Reference in New Issue
Block a user