Compare commits
666 Commits
user/mruss
...
user/miche
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
134202011c | ||
|
|
fddfafc487 | ||
|
|
906dd1396d | ||
|
|
64219571e4 | ||
|
|
3f61ec1d69 | ||
|
|
00e9f61509 | ||
|
|
8d4fe1ad6a | ||
|
|
6eeab64f8a | ||
|
|
8feda920da | ||
|
|
1edfbf792a | ||
|
|
df96e5b3b2 | ||
|
|
e044208534 | ||
|
|
2f62e5496e | ||
|
|
2475645f5f | ||
|
|
ba477e81ce | ||
|
|
ab85147296 | ||
|
|
05fcfca374 | ||
|
|
b166296ba5 | ||
|
|
adb1d08cc2 | ||
|
|
520cc69534 | ||
|
|
1df2a7b2da | ||
|
|
fa72aed5b6 | ||
|
|
1a936113c2 | ||
|
|
a5f758d7c6 | ||
|
|
5902f8fcc7 | ||
|
|
f8a963b86f | ||
|
|
42b0efdd99 | ||
|
|
335c92c961 | ||
|
|
7694d03dee | ||
|
|
aa793cbd4a | ||
|
|
db86586530 | ||
|
|
dd9c35ba78 | ||
|
|
f96c50a4e2 | ||
|
|
7b45c56be5 | ||
|
|
f762e2758f | ||
|
|
35743b72de | ||
|
|
5823657e38 | ||
|
|
3ba8b27680 | ||
|
|
0061c217bd | ||
|
|
53fc441a8a | ||
|
|
049773a5fa | ||
|
|
e76f29ff7a | ||
|
|
adcb07bf62 | ||
|
|
67e3383ffc | ||
|
|
1537d0ab90 | ||
|
|
ac5a9b90c7 | ||
|
|
f35d24a9c3 | ||
|
|
fbdefb2e3e | ||
|
|
0e39d0f6e6 | ||
|
|
2be7f3a3ff | ||
|
|
0cf864870c | ||
|
|
b8eecba63d | ||
|
|
7308aa57a2 | ||
|
|
1fd3b2e2db | ||
|
|
69e48bbe19 | ||
|
|
0db1a67eaf | ||
|
|
ccb8468e9b | ||
|
|
1786916a16 | ||
|
|
0507ad4f68 | ||
|
|
f6198d20c6 | ||
|
|
78e29f4f20 | ||
|
|
fb4bfaf029 | ||
|
|
809a9c6de0 | ||
|
|
bed90e3a41 | ||
|
|
f4c11593d4 | ||
|
|
71e6520cd1 | ||
|
|
a5f15db057 | ||
|
|
edec51988d | ||
|
|
ddca6765b8 | ||
|
|
6163daaaa4 | ||
|
|
cedaa83bce | ||
|
|
4bb965c283 | ||
|
|
4feaef3436 | ||
|
|
e9aac40ba8 | ||
|
|
386ad61007 | ||
|
|
cac4289619 | ||
|
|
0bda18eab5 | ||
|
|
8e2a394442 | ||
|
|
8ab2227148 | ||
|
|
9dab08dfbc | ||
|
|
05dfa26c54 | ||
|
|
edbba48e81 | ||
|
|
10119c1a59 | ||
|
|
c7ef189da0 | ||
|
|
51efe6dfee | ||
|
|
b0592d9bc8 | ||
|
|
363fe64ff9 | ||
|
|
bbcb12e919 | ||
|
|
3e87b09d34 | ||
|
|
81de27dc9a | ||
|
|
eb94a5f03f | ||
|
|
742708942c | ||
|
|
5a2f9b6589 | ||
|
|
06f0c579b7 | ||
|
|
7890767d34 | ||
|
|
d322cb0220 | ||
|
|
f011173ff6 | ||
|
|
20129cd4c2 | ||
|
|
307823bc8d | ||
|
|
a445d9c9da | ||
|
|
f24030d4d8 | ||
|
|
7598aeaad7 | ||
|
|
4485cc0b5b | ||
|
|
64303781c2 | ||
|
|
dd3e305164 | ||
|
|
cb9cac6a1b | ||
|
|
95f9b45418 | ||
|
|
f9db727647 | ||
|
|
230c7fdfab | ||
|
|
320f7e8450 | ||
|
|
08fbbb318f | ||
|
|
8b98399206 | ||
|
|
237b14a6ec | ||
|
|
2e705ff554 | ||
|
|
d72a3f9c32 | ||
|
|
73ac4f38b2 | ||
|
|
a0e69dd708 | ||
|
|
b207babd9e | ||
|
|
293870d0f6 | ||
|
|
87a8cb6d89 | ||
|
|
69dc3f5c9c | ||
|
|
e4d4754600 | ||
|
|
5998203a33 | ||
|
|
8cfab38824 | ||
|
|
6fa7df35df | ||
|
|
ee5525fea1 | ||
|
|
a1daeaf0c4 | ||
|
|
fb7c288c94 | ||
|
|
2e528a8b12 | ||
|
|
6d723c45a9 | ||
|
|
674e784aa9 | ||
|
|
42bf1e8b9d | ||
|
|
4257fe5045 | ||
|
|
ea89b29fe5 | ||
|
|
50e9a8ed6a | ||
|
|
1d4f660075 | ||
|
|
bd4db8d747 | ||
|
|
a8da4a347e | ||
|
|
b8c2b0bb93 | ||
|
|
c58b504a9e | ||
|
|
a75d00970f | ||
|
|
671ac3411f | ||
|
|
299effe0f1 | ||
|
|
4df18de636 | ||
|
|
8dc69c6126 | ||
|
|
7d481e6048 | ||
|
|
a0018240d5 | ||
|
|
cf03ca930f | ||
|
|
ecc960bf8a | ||
|
|
b77cee7cc6 | ||
|
|
5231752487 | ||
|
|
4ce3362724 | ||
|
|
6230840397 | ||
|
|
c5845ee203 | ||
|
|
0030ff3f74 | ||
|
|
dc726cb9a3 | ||
|
|
b7a9b0689a | ||
|
|
a7a51cfc9c | ||
|
|
0d70f0b85c | ||
|
|
c1ee25d9f7 | ||
|
|
9886520d33 | ||
|
|
3b24ad3c84 | ||
|
|
54c3c6d684 | ||
|
|
fb92935601 | ||
|
|
dcd850feab | ||
|
|
1ce368503d | ||
|
|
fb075a709d | ||
|
|
3424644ecd | ||
|
|
c37936f2c9 | ||
|
|
c5382a450c | ||
|
|
2f7339b410 | ||
|
|
9e5f254db0 | ||
|
|
8122721f6d | ||
|
|
5c352ae558 | ||
|
|
9386892f8e | ||
|
|
267a837a2c | ||
|
|
28b595c651 | ||
|
|
9fd4c21d4d | ||
|
|
02e1ed0bfb | ||
|
|
e18274bc9a | ||
|
|
68c271ad25 | ||
|
|
a3ada81816 | ||
|
|
203315d378 | ||
|
|
78c640b6d8 | ||
|
|
d5a87f67cf | ||
|
|
8bcf41761d | ||
|
|
1efaf02df9 | ||
|
|
cf58890bb0 | ||
|
|
7c2c67fc3c | ||
|
|
70130b9841 | ||
|
|
6167886472 | ||
|
|
f9fb9d4594 | ||
|
|
d86d29fe21 | ||
|
|
f83d215e7a | ||
|
|
7361a11a4d | ||
|
|
0cce2fe0fa | ||
|
|
88d26ae976 | ||
|
|
3a2308d86f | ||
|
|
fdd04efdb7 | ||
|
|
ff18be18ad | ||
|
|
427720426b | ||
|
|
66693965c0 | ||
|
|
334cf8143e | ||
|
|
5b49601072 | ||
|
|
0185a0b6fd | ||
|
|
70d418935d | ||
|
|
eb44a06a9b | ||
|
|
8eb3c1510c | ||
|
|
4d5ecb082e | ||
|
|
6e687e2910 | ||
|
|
eb710647bf | ||
|
|
176557d770 | ||
|
|
3beab33fac | ||
|
|
c0ba4b4954 | ||
|
|
8fb373aeb2 | ||
|
|
5a0ee06651 | ||
|
|
05a237ce10 | ||
|
|
88cc2b8fc8 | ||
|
|
b69132c79d | ||
|
|
db897a1619 | ||
|
|
0b5b62c8fb | ||
|
|
056f79d358 | ||
|
|
114ec644d0 | ||
|
|
26ee8b6ae5 | ||
|
|
38e8864284 | ||
|
|
80d566eb56 | ||
|
|
bb5a95889f | ||
|
|
0ea27704f6 | ||
|
|
2abbd60a0d | ||
|
|
1c8daf11fd | ||
|
|
cdcf346061 | ||
|
|
42f95e827d | ||
|
|
618ed00d45 | ||
|
|
50d8db481e | ||
|
|
e4a5971ffd | ||
|
|
36f9ccd851 | ||
|
|
787aee0e60 | ||
|
|
0341a38fdd | ||
|
|
ffbed4a141 | ||
|
|
03fe0f054b | ||
|
|
fd74c194b6 | ||
|
|
0959694bab | ||
|
|
7b01e16439 | ||
|
|
66816fd871 | ||
|
|
599326508f | ||
|
|
2f04d0d2b9 | ||
|
|
e002c5ec56 | ||
|
|
3dfb37e976 | ||
|
|
b6a2200983 | ||
|
|
85fe8a3f4e | ||
|
|
bb69cb3c8c | ||
|
|
ae51c19b3c | ||
|
|
9ea79f8a76 | ||
|
|
1d4ec50a58 | ||
|
|
4c73891575 | ||
|
|
d3b84ecd6f | ||
|
|
e1d55c7a44 | ||
|
|
85242cac67 | ||
|
|
0d88a5ee09 | ||
|
|
62e237bdee | ||
|
|
c85f88fb62 | ||
|
|
a90f4872f2 | ||
|
|
a16ea283f5 | ||
|
|
8209a6dfb7 | ||
|
|
b5fbeb7401 | ||
|
|
2ac25b02e2 | ||
|
|
39fe4b1301 | ||
|
|
140e30e386 | ||
|
|
ddcc0415e4 | ||
|
|
5195f40fd3 | ||
|
|
98c6557869 | ||
|
|
ee820859d3 | ||
|
|
5d6879d93a | ||
|
|
fae47d58d3 | ||
|
|
3a07301365 | ||
|
|
f1af97dc9c | ||
|
|
f2266101df | ||
|
|
9784d8a47f | ||
|
|
af769abd8d | ||
|
|
12c13e320e | ||
|
|
273fa2e6e1 | ||
|
|
d143043037 | ||
|
|
ca45c34ad5 | ||
|
|
b1679050de | ||
|
|
d2c41b35db | ||
|
|
bc7b6d3daf | ||
|
|
2516101cba | ||
|
|
aebea08a99 | ||
|
|
03616db82c | ||
|
|
93c4fc198f | ||
|
|
8cd44ae163 | ||
|
|
2ae657f568 | ||
|
|
508f5d1407 | ||
|
|
c8b1132846 | ||
|
|
ef777993cd | ||
|
|
760d60ad4b | ||
|
|
875c0271b7 | ||
|
|
57344bfde5 | ||
|
|
46827fb002 | ||
|
|
2fd78879f6 | ||
|
|
e8449e9630 | ||
|
|
a0e2be8b92 | ||
|
|
181727c0fe | ||
|
|
d1d6ffd23c | ||
|
|
e5801f467f | ||
|
|
c6ca9523de | ||
|
|
642e3a3274 | ||
|
|
146148c48c | ||
|
|
8f15835daa | ||
|
|
022bd65125 | ||
|
|
63d8c96514 | ||
|
|
4624a836e5 | ||
|
|
ad7eea132d | ||
|
|
22a1899ff4 | ||
|
|
17a3a31b5f | ||
|
|
1a8b99e360 | ||
|
|
6db2154f28 | ||
|
|
be3adda95f | ||
|
|
9d48d236c1 | ||
|
|
b57d6a7776 | ||
|
|
d1f76cba8e | ||
|
|
d78cef1fee | ||
|
|
30a808c0ae | ||
|
|
4a7f85a6ec | ||
|
|
b6b9635be6 | ||
|
|
21b1026872 | ||
|
|
8c3eab32b0 | ||
|
|
29633865c7 | ||
|
|
702749b7d3 | ||
|
|
b43ece8934 | ||
|
|
c10c5a0e64 | ||
|
|
a8db91c40e | ||
|
|
0f5f7ac780 | ||
|
|
bf1c737858 | ||
|
|
d07c7347f8 | ||
|
|
57e5e4cc07 | ||
|
|
2743c29a96 | ||
|
|
2bb73ac431 | ||
|
|
9afc4b771c | ||
|
|
f71e224023 | ||
|
|
768e36660d | ||
|
|
889de7c415 | ||
|
|
790d6740ba | ||
|
|
3539251b18 | ||
|
|
1f210bc8a3 | ||
|
|
d70bc4bde9 | ||
|
|
bdbca09cb2 | ||
|
|
e0b292ab51 | ||
|
|
f960f4d8d4 | ||
|
|
9e57ec7837 | ||
|
|
0a7f51f0da | ||
|
|
4ca92a28e9 | ||
|
|
0464dc91b3 | ||
|
|
d32daebf75 | ||
|
|
27cb0c40bd | ||
|
|
12abc9ca86 | ||
|
|
4005065223 | ||
|
|
443fed216c | ||
|
|
42a87e7211 | ||
|
|
5322417c03 | ||
|
|
4041f57943 | ||
|
|
034171a89a | ||
|
|
2c86fea78a | ||
|
|
782dff1163 | ||
|
|
8924ccbbab | ||
|
|
792c3d961d | ||
|
|
e998dddcfa | ||
|
|
437fc29e12 | ||
|
|
aee86b4b18 | ||
|
|
1c873df5c0 | ||
|
|
99c0938b42 | ||
|
|
716029b1e3 | ||
|
|
3848a8f9aa | ||
|
|
f7672e14c7 | ||
|
|
e393af2d88 | ||
|
|
0dcb2caba8 | ||
|
|
4679725957 | ||
|
|
57319062aa | ||
|
|
078f59bfd1 | ||
|
|
36fcea2002 | ||
|
|
2971bdfed5 | ||
|
|
28cd3a6f3a | ||
|
|
c0570b3003 | ||
|
|
eeb8490016 | ||
|
|
854b78975a | ||
|
|
e55d2ffe50 | ||
|
|
1ebd81552c | ||
|
|
145fe4cd17 | ||
|
|
65569ba90e | ||
|
|
79293800f1 | ||
|
|
bc765f9e95 | ||
|
|
201311503f | ||
|
|
8cc0232e73 | ||
|
|
6bfcc18e73 | ||
|
|
e004247ed4 | ||
|
|
e096754d14 | ||
|
|
02803f545d | ||
|
|
8503e8e166 | ||
|
|
d6007c6e7d | ||
|
|
50963fcf13 | ||
|
|
b568de35ad | ||
|
|
ae9c81ac39 | ||
|
|
78fd1a1e04 | ||
|
|
90533e6b9f | ||
|
|
051a52a4ce | ||
|
|
2292b514aa | ||
|
|
1f1a01a798 | ||
|
|
faa476f0d2 | ||
|
|
5130b69ece | ||
|
|
aed85241b7 | ||
|
|
21c3ac42ee | ||
|
|
2d3a5fb2be | ||
|
|
a631e4c11c | ||
|
|
222d6f104e | ||
|
|
7a3b424cd3 | ||
|
|
2c22f7d76d | ||
|
|
af295fadb5 | ||
|
|
9644e2b086 | ||
|
|
6ccf083127 | ||
|
|
bb774e7acd | ||
|
|
dcbbeab80b | ||
|
|
b71ac34214 | ||
|
|
a774af2eab | ||
|
|
c237d1379e | ||
|
|
cf963eb1b0 | ||
|
|
4293b6a4fb | ||
|
|
725b446ad6 | ||
|
|
7a75bb9f61 | ||
|
|
0c1d4cb323 | ||
|
|
c6212d585d | ||
|
|
7c8ab8e2d6 | ||
|
|
1de75c46c0 | ||
|
|
4ad109cff8 | ||
|
|
8994252019 | ||
|
|
9832daf08d | ||
|
|
39d8f45810 | ||
|
|
30fcd3d417 | ||
|
|
039b437ef0 | ||
|
|
7582a0a2b0 | ||
|
|
25388d0947 | ||
|
|
7152bc8aa7 | ||
|
|
5b46dc0b6a | ||
|
|
4273f1f384 | ||
|
|
97194bf7f3 | ||
|
|
0ac026b521 | ||
|
|
ff7cfdaf40 | ||
|
|
57c97762e1 | ||
|
|
a38bb15e79 | ||
|
|
3ceaee999d | ||
|
|
d485dc1313 | ||
|
|
329d103453 | ||
|
|
9f46a3d8f9 | ||
|
|
c9ca9e4316 | ||
|
|
5a57e6f4a7 | ||
|
|
a2f5c34625 | ||
|
|
1f1e1bcfe8 | ||
|
|
e047074825 | ||
|
|
a6015a55f9 | ||
|
|
c2e761437d | ||
|
|
fedac994c3 | ||
|
|
7d558d058e | ||
|
|
1d3e1cbdbd | ||
|
|
0ccc957d5c | ||
|
|
a4d487bc1d | ||
|
|
8ca03a7255 | ||
|
|
f2ed2bfb2f | ||
|
|
40675ec76c | ||
|
|
9e34c1d731 | ||
|
|
857f335be9 | ||
|
|
fc4a95f187 | ||
|
|
4fe1880887 | ||
|
|
6fa859fa19 | ||
|
|
2abfa5838d | ||
|
|
3d119c0ccb | ||
|
|
a32081757d | ||
|
|
56c04ffc53 | ||
|
|
715d4557af | ||
|
|
6541982dff | ||
|
|
43bc9404bb | ||
|
|
375499c323 | ||
|
|
17a4447cef | ||
|
|
287dc13d96 | ||
|
|
02a1cf6a4e | ||
|
|
34cd1e47bf | ||
|
|
74d56834af | ||
|
|
dd80dbb4cd | ||
|
|
bc020ee0a4 | ||
|
|
a15767aff1 | ||
|
|
9af0a9bf37 | ||
|
|
e2c8bc6948 | ||
|
|
2c68c6ca40 | ||
|
|
dd1f33e5ed | ||
|
|
2c1bb766ff | ||
|
|
c1c71fb994 | ||
|
|
2d56f35071 | ||
|
|
64ce2669ca | ||
|
|
f39652707c | ||
|
|
f527adf7a9 | ||
|
|
6a77189f50 | ||
|
|
e4a6d035f9 | ||
|
|
794f6e00fc | ||
|
|
97494c6a39 | ||
|
|
9358d334c7 | ||
|
|
712d5dae4f | ||
|
|
952e892fe5 | ||
|
|
e8159997c7 | ||
|
|
1c15bab70f | ||
|
|
c85a9253e7 | ||
|
|
8d659a6aa9 | ||
|
|
f6a2396484 | ||
|
|
7a7af82e35 | ||
|
|
7f23972f3f | ||
|
|
3362b665e6 | ||
|
|
eeeccdba53 | ||
|
|
bd5b181dfd | ||
|
|
858678786a | ||
|
|
0f972661e1 | ||
|
|
2e9b144c56 | ||
|
|
fa8ba9e4e2 | ||
|
|
2037cc0219 | ||
|
|
5006da72ff | ||
|
|
ad0bacbfe4 | ||
|
|
e33ca2c980 | ||
|
|
9f0a8a49d0 | ||
|
|
a3cd18eda9 | ||
|
|
f0505e81cc | ||
|
|
7dc9ffe4c9 | ||
|
|
0e98c6ee96 | ||
|
|
1f7ddc1d76 | ||
|
|
ce63cfdb25 | ||
|
|
974028bd28 | ||
|
|
a36ed39487 | ||
|
|
c37b1d45b6 | ||
|
|
d6f1359e69 | ||
|
|
2357d4aceb | ||
|
|
f994febca4 | ||
|
|
12f52632ed | ||
|
|
8a64d8268b | ||
|
|
d6ccdc222c | ||
|
|
9bd0788131 | ||
|
|
1ae62c28f7 | ||
|
|
baf6e66c3d | ||
|
|
a065bd61ae | ||
|
|
84565c7c2e | ||
|
|
05b54733da | ||
|
|
513b008bcc | ||
|
|
32fffd4bbb | ||
|
|
03c7cf8a63 | ||
|
|
074f0ac8fe | ||
|
|
25c63ccf63 | ||
|
|
5dc3c74e64 | ||
|
|
5e9473806c | ||
|
|
4214b01703 | ||
|
|
b974e5541f | ||
|
|
10706ed753 | ||
|
|
fd64dc84ae | ||
|
|
0b8205a8a0 | ||
|
|
57ae509823 | ||
|
|
5d24ce3160 | ||
|
|
d694ea1d38 | ||
|
|
a00936686f | ||
|
|
2feb5edc65 | ||
|
|
b80e55ca44 | ||
|
|
e8ce388109 | ||
|
|
a4c1da25de | ||
|
|
a003e7c081 | ||
|
|
06988b2135 | ||
|
|
7ed7570b17 | ||
|
|
e2d13ba7e4 | ||
|
|
f6c1049474 | ||
|
|
2b24feb604 | ||
|
|
a27411022d | ||
|
|
3827974b58 | ||
|
|
b299cfea8a | ||
|
|
a13e49073c | ||
|
|
2c7e0f17b6 | ||
|
|
418866007e | ||
|
|
5ab418dbeb | ||
|
|
95f61ee9d4 | ||
|
|
ac89c8d226 | ||
|
|
d75d904e43 | ||
|
|
ea4d8d990c | ||
|
|
c93cbb8311 | ||
|
|
c0137e89b9 | ||
|
|
3111ba78ad | ||
|
|
3d3a176940 | ||
|
|
212c6095a2 | ||
|
|
bf6f89a5b5 | ||
|
|
48469ec674 | ||
|
|
c7dfd32b43 | ||
|
|
8861546ad8 | ||
|
|
9c1a893ee3 | ||
|
|
e81c36cf74 | ||
|
|
ed83cbd4f2 | ||
|
|
2a33b9ad87 | ||
|
|
6e85aa13ec | ||
|
|
af05a1725c | ||
|
|
800c4a847f | ||
|
|
bba8c4c0d4 | ||
|
|
68b369e321 | ||
|
|
8d60ac3ffc | ||
|
|
731fb6ebaf | ||
|
|
13e124302f | ||
|
|
59bdd29106 | ||
|
|
659ec4434d | ||
|
|
124829104b | ||
|
|
21cd2940a9 | ||
|
|
da265ca920 | ||
|
|
a1809ad3de | ||
|
|
8699a28be0 | ||
|
|
65db5afe1c | ||
|
|
75d5fa4604 | ||
|
|
e64fad2224 | ||
|
|
eecf32e77a | ||
|
|
3354d919fc | ||
|
|
aca464ca72 | ||
|
|
fe483b1d0d | ||
|
|
ddeade077e | ||
|
|
c4c2ce04e7 | ||
|
|
2cb0bf5d41 | ||
|
|
b86a2c0b47 | ||
|
|
c574eb4984 | ||
|
|
1e49cc4d60 | ||
|
|
e71095960f | ||
|
|
90e099b39f | ||
|
|
334deb985d | ||
|
|
8548a87bd4 | ||
|
|
638d411cd3 | ||
|
|
dd974529cf | ||
|
|
43e079f73e | ||
|
|
6674e36824 | ||
|
|
ae9605f03c | ||
|
|
3c0a209f9f | ||
|
|
1ee1acf8ad | ||
|
|
c4d912a241 | ||
|
|
4323bdce22 | ||
|
|
5daa45436d | ||
|
|
4def6d6ac2 | ||
|
|
d8560b8d5f | ||
|
|
380b836eee | ||
|
|
eec6796cb8 | ||
|
|
25a8597680 | ||
|
|
b8b368310c | ||
|
|
5097cd900e | ||
|
|
bc16e1b497 | ||
|
|
8f821ecad0 | ||
|
|
4519016e67 | ||
|
|
59e2757434 | ||
|
|
73b64c3089 | ||
|
|
66f8736598 | ||
|
|
4c41f6fcc6 | ||
|
|
44f9b21e74 | ||
|
|
03f49ceaf0 | ||
|
|
8e7d6970ea | ||
|
|
286bca37cc | ||
|
|
a2c181992a | ||
|
|
32eb0cec8f | ||
|
|
96c7052777 | ||
|
|
975c1c25c3 | ||
|
|
20f466768e | ||
|
|
8af693548e | ||
|
|
963738d983 | ||
|
|
e0df56de62 | ||
|
|
538455a965 | ||
|
|
172809a502 | ||
|
|
55e4ff6742 |
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2015,
|
||||
3058,
|
||||
3061,
|
||||
1071,
|
||||
1071,
|
||||
2035,
|
||||
2152,
|
||||
2029,
|
||||
2499
|
||||
],
|
||||
"end_pos": [
|
||||
-1008,
|
||||
-1963,
|
||||
-1966,
|
||||
2141,
|
||||
2143,
|
||||
-971,
|
||||
3043,
|
||||
-1077,
|
||||
3144
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-1024
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2035,
|
||||
3024,
|
||||
3019,
|
||||
979,
|
||||
981,
|
||||
1982,
|
||||
2166,
|
||||
2124,
|
||||
1968
|
||||
],
|
||||
"end_pos": [
|
||||
-990,
|
||||
-2017,
|
||||
-2015,
|
||||
2078,
|
||||
2076,
|
||||
-1030,
|
||||
3117,
|
||||
-1016,
|
||||
2556
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2056,
|
||||
2895,
|
||||
2896,
|
||||
1191,
|
||||
1190,
|
||||
2018,
|
||||
2051,
|
||||
2056,
|
||||
2509
|
||||
],
|
||||
"end_pos": [
|
||||
-1040,
|
||||
-2004,
|
||||
-2006,
|
||||
2126,
|
||||
2127,
|
||||
-1010,
|
||||
3050,
|
||||
-1117,
|
||||
3143
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
"homing_offset": [
|
||||
2048,
|
||||
3072,
|
||||
3072,
|
||||
-1024,
|
||||
-1024,
|
||||
2048,
|
||||
-2048,
|
||||
2048,
|
||||
-2048
|
||||
],
|
||||
"drive_mode": [
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"start_pos": [
|
||||
2068,
|
||||
3034,
|
||||
3030,
|
||||
1038,
|
||||
1041,
|
||||
1991,
|
||||
1948,
|
||||
2090,
|
||||
1985
|
||||
],
|
||||
"end_pos": [
|
||||
-1025,
|
||||
-2014,
|
||||
-2015,
|
||||
2058,
|
||||
2060,
|
||||
-955,
|
||||
3091,
|
||||
-940,
|
||||
2576
|
||||
],
|
||||
"calib_mode": [
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"DEGREE",
|
||||
"LINEAR"
|
||||
],
|
||||
"motor_names": [
|
||||
"waist",
|
||||
"shoulder",
|
||||
"shoulder_shadow",
|
||||
"elbow",
|
||||
"elbow_shadow",
|
||||
"forearm_roll",
|
||||
"wrist_angle",
|
||||
"wrist_rotate",
|
||||
"gripper"
|
||||
]
|
||||
}
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Misc
|
||||
.git
|
||||
tmp
|
||||
@@ -59,7 +73,7 @@ pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
!tests/data
|
||||
!tests/artifacts
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
|
||||
15
.gitattributes
vendored
@@ -1,6 +1,21 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*.memmap filter=lfs diff=lfs merge=lfs -text
|
||||
*.stl filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.json !text !filter !merge !diff
|
||||
tests/artifacts/cameras/*.png filter=lfs diff=lfs merge=lfs -text
|
||||
*.bag filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,3 +1,17 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve LeRobot
|
||||
body:
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -21,7 +21,7 @@ Provide a simple way for the reviewer to try out your changes.
|
||||
|
||||
Examples:
|
||||
```bash
|
||||
DATA_DIR=tests/data pytest -sx tests/test_stuff.py::test_something
|
||||
pytest -sx tests/test_stuff.py::test_something
|
||||
```
|
||||
```bash
|
||||
python lerobot/scripts/train.py --some.option=true
|
||||
|
||||
50
.github/workflows/build-docker-images.yml
vendored
@@ -1,3 +1,17 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/build_docker_images.yml
|
||||
name: Builds
|
||||
@@ -8,6 +22,8 @@ on:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
@@ -24,21 +40,24 @@ jobs:
|
||||
git lfs install
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-cpu/Dockerfile
|
||||
@@ -59,21 +78,24 @@ jobs:
|
||||
git lfs install
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-gpu/Dockerfile
|
||||
@@ -88,19 +110,23 @@ jobs:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU dev
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/lerobot-gpu-dev/Dockerfile
|
||||
|
||||
23
.github/workflows/build_documentation.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Build documentation
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- "docs/**"
|
||||
branches:
|
||||
- main
|
||||
- doc-builder*
|
||||
- v*-release
|
||||
|
||||
|
||||
jobs:
|
||||
build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
|
||||
uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
|
||||
with:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: lerobot
|
||||
additional_args: --not_python_module
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
19
.github/workflows/build_pr_documentation.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Build PR Documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
|
||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
||||
with:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: lerobot
|
||||
additional_args: --not_python_module
|
||||
28
.github/workflows/nightly-tests.yml
vendored
@@ -1,3 +1,17 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/nightly.yml
|
||||
name: Nightly
|
||||
@@ -7,10 +21,10 @@ on:
|
||||
schedule:
|
||||
- cron: "0 2 * * *"
|
||||
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
# SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
permissions: {}
|
||||
|
||||
# env:
|
||||
# SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
jobs:
|
||||
run_all_tests_cpu:
|
||||
name: CPU
|
||||
@@ -19,7 +33,7 @@ jobs:
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
container:
|
||||
image: huggingface/lerobot-cpu:latest
|
||||
image: huggingface/lerobot-cpu:latest # zizmor: ignore[unpinned-images]
|
||||
options: --shm-size "16gb"
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
@@ -30,13 +44,9 @@ jobs:
|
||||
working-directory: /lerobot
|
||||
steps:
|
||||
- name: Tests
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
run: pytest -v --cov=./lerobot --disable-warnings tests
|
||||
|
||||
- name: Tests end-to-end
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
run: make test-end-to-end
|
||||
|
||||
|
||||
@@ -50,7 +60,7 @@ jobs:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/lerobot-gpu:latest
|
||||
image: huggingface/lerobot-gpu:latest # zizmor: ignore[unpinned-images]
|
||||
options: --gpus all --shm-size "16gb"
|
||||
credentials:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
|
||||
76
.github/workflows/quality.yml
vendored
@@ -1,15 +1,29 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Quality
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
workflow_call:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
@@ -19,10 +33,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4.9.1
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
@@ -30,55 +46,27 @@ jobs:
|
||||
id: get-ruff-version
|
||||
run: |
|
||||
RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
|
||||
echo "RUFF_VERSION=${RUFF_VERSION}" >> $GITHUB_ENV
|
||||
echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Ruff
|
||||
run: python -m pip install "ruff==${{ env.RUFF_VERSION }}"
|
||||
env:
|
||||
RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
|
||||
run: python -m pip install "ruff==${RUFF_VERSION}"
|
||||
|
||||
- name: Ruff check
|
||||
run: ruff check
|
||||
run: ruff check --output-format=github
|
||||
|
||||
- name: Ruff format
|
||||
run: ruff format --diff
|
||||
|
||||
|
||||
poetry_check:
|
||||
name: Poetry check
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install poetry
|
||||
run: pipx install poetry
|
||||
|
||||
- name: Poetry check
|
||||
run: poetry check
|
||||
|
||||
|
||||
poetry_relax:
|
||||
name: Poetry relax
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install poetry
|
||||
run: pipx install poetry
|
||||
|
||||
- name: Install poetry-relax
|
||||
run: poetry self add poetry-relax
|
||||
|
||||
- name: Poetry relax
|
||||
id: poetry_relax
|
||||
run: |
|
||||
output=$(poetry relax --check 2>&1)
|
||||
if echo "$output" | grep -q "Proposing updates"; then
|
||||
echo "$output"
|
||||
echo ""
|
||||
echo "Some dependencies have caret '^' version requirement added by poetry by default."
|
||||
echo "Please replace them with '>='. You can do this by hand or use poetry-relax to do this."
|
||||
exit 1
|
||||
else
|
||||
echo "$output"
|
||||
fi
|
||||
- name: typos-action
|
||||
uses: crate-ci/typos@db35ee91e80fbb447f33b0e5fbddb24d2a1a884f # v1.29.10
|
||||
|
||||
41
.github/workflows/test-docker-build.yml
vendored
@@ -1,15 +1,29 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Inspired by
|
||||
# https://github.com/huggingface/peft/blob/main/.github/workflows/test-docker-build.yml
|
||||
name: Test Dockerfiles
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# Run only when DockerFile files are modified
|
||||
- "docker/**"
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
@@ -21,43 +35,46 @@ jobs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v44
|
||||
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
||||
with:
|
||||
files: docker/**
|
||||
json: "true"
|
||||
|
||||
- name: Run step if only the files listed above change
|
||||
- name: Run step if only the files listed above change # zizmor: ignore[template-injection]
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
id: set-matrix
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
run: |
|
||||
echo "matrix=${{ steps.changed-files.outputs.all_changed_files}}" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
build_modified_dockerfiles:
|
||||
name: Build modified Docker images
|
||||
needs: get_changed_files
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
if: ${{ needs.get_changed_files.outputs.matrix }} != ''
|
||||
if: needs.get_changed_files.outputs.matrix != ''
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
docker-file: ${{ fromJson(needs.get_changed_files.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
with:
|
||||
cache-binary: false
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
file: ${{ matrix.docker-file }}
|
||||
context: .
|
||||
|
||||
102
.github/workflows/test.yml
vendored
@@ -1,15 +1,28 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "lerobot/**"
|
||||
- "tests/**"
|
||||
- "examples/**"
|
||||
- ".github/**"
|
||||
- "poetry.lock"
|
||||
- "pyproject.toml"
|
||||
- ".pre-commit-config.yaml"
|
||||
- "Makefile"
|
||||
- ".cache/**"
|
||||
push:
|
||||
@@ -20,21 +33,27 @@ on:
|
||||
- "tests/**"
|
||||
- "examples/**"
|
||||
- ".github/**"
|
||||
- "poetry.lock"
|
||||
- "pyproject.toml"
|
||||
- ".pre-commit-config.yaml"
|
||||
- "Makefile"
|
||||
- ".cache/**"
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
UV_VERSION: "0.6.0"
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
name: Pytest
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install apt dependencies
|
||||
# portaudio19-dev is needed to install pyaudio
|
||||
@@ -42,25 +61,19 @@ jobs:
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
# TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install uv and python
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
|
||||
with:
|
||||
enable-cache: true
|
||||
version: ${{ env.UV_VERSION }}
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
|
||||
- name: Install poetry dependencies
|
||||
run: |
|
||||
poetry install --all-extras
|
||||
- name: Install lerobot (all extras)
|
||||
run: uv sync --all-extras
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest tests -v --cov=./lerobot --durations=0 \
|
||||
uv run pytest tests -v --cov=./lerobot --durations=0 \
|
||||
-W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
|
||||
-W ignore::UserWarning:torch.utils.data.dataloader:558 \
|
||||
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
|
||||
@@ -70,71 +83,66 @@ jobs:
|
||||
name: Pytest (minimal install)
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install apt dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y ffmpeg
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
# TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install uv and python
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
|
||||
with:
|
||||
enable-cache: true
|
||||
version: ${{ env.UV_VERSION }}
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install poetry dependencies
|
||||
run: |
|
||||
poetry install --extras "test"
|
||||
- name: Install lerobot
|
||||
run: uv sync --extra "test"
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest tests -v --cov=./lerobot --durations=0 \
|
||||
uv run pytest tests -v --cov=./lerobot --durations=0 \
|
||||
-W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
|
||||
-W ignore::UserWarning:torch.utils.data.dataloader:558 \
|
||||
-W ignore::UserWarning:gymnasium.utils.env_checker:247 \
|
||||
&& rm -rf tests/outputs outputs
|
||||
|
||||
|
||||
end-to-end:
|
||||
name: End-to-end
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DATA_DIR: tests/data
|
||||
MUJOCO_GL: egl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
lfs: true # Ensure LFS files are pulled
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install apt dependencies
|
||||
# portaudio19-dev is needed to install pyaudio
|
||||
run: |
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
|
||||
sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
|
||||
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry && poetry config virtualenvs.in-project true
|
||||
echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install uv and python
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
|
||||
with:
|
||||
enable-cache: true
|
||||
version: ${{ env.UV_VERSION }}
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
|
||||
- name: Install poetry dependencies
|
||||
- name: Install lerobot (all extras)
|
||||
run: |
|
||||
poetry install --all-extras
|
||||
uv venv
|
||||
uv sync --all-extras
|
||||
|
||||
- name: venv
|
||||
run: |
|
||||
echo "PYTHON_PATH=${{ github.workspace }}/.venv/bin/python" >> $GITHUB_ENV
|
||||
|
||||
- name: Test end-to-end
|
||||
run: |
|
||||
|
||||
23
.github/workflows/trufflehog.yml
vendored
@@ -1,20 +1,35 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
name: Secret Leaks
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
uses: trufflesecurity/trufflehog@90694bf9af66e7536abc5824e7a87246dbf933cb # v3.88.35
|
||||
with:
|
||||
extra_args: --only-verified
|
||||
|
||||
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Upload PR Documentation
|
||||
|
||||
on: # zizmor: ignore[dangerous-triggers] We follow the same pattern as in Transformers
|
||||
workflow_run:
|
||||
workflows: [ "Build PR Documentation" ]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
|
||||
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
||||
with:
|
||||
package_name: lerobot
|
||||
secrets:
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||
29
.gitignore
vendored
@@ -1,3 +1,20 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Dev scripts
|
||||
.dev
|
||||
|
||||
# Logging
|
||||
logs
|
||||
tmp
|
||||
@@ -12,6 +29,7 @@ outputs
|
||||
|
||||
# VS Code
|
||||
.vscode
|
||||
.devcontainer
|
||||
|
||||
# HPC
|
||||
nautilus/*.yaml
|
||||
@@ -49,6 +67,10 @@ share/python-wheels/
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# uv/poetry lock files
|
||||
poetry.lock
|
||||
uv.lock
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
@@ -60,7 +82,7 @@ pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
!tests/data
|
||||
!tests/artifacts
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
@@ -73,10 +95,8 @@ coverage.xml
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Ignore .cache except calibration
|
||||
# Ignore .cache
|
||||
.cache/*
|
||||
!.cache/calibration/
|
||||
!.cache/calibration/**
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
@@ -125,6 +145,7 @@ celerybeat.pid
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
@@ -1,9 +1,31 @@
|
||||
exclude: ^(tests/data)
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exclude: "tests/artifacts/.*\\.safetensors$"
|
||||
default_language_version:
|
||||
python: python3.10
|
||||
repos:
|
||||
##### Meta #####
|
||||
- repo: meta
|
||||
hooks:
|
||||
- id: check-useless-excludes
|
||||
- id: check-hooks-apply
|
||||
|
||||
|
||||
##### Style / Misc. #####
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: debug-statements
|
||||
@@ -13,25 +35,39 @@ repos:
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adhtruong/mirrors-typos
|
||||
rev: v1.32.0
|
||||
hooks:
|
||||
- id: typos
|
||||
args: [--force-exclude]
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.16.0
|
||||
rev: v3.20.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.5.2
|
||||
rev: v0.11.11
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/python-poetry/poetry
|
||||
rev: 1.8.0
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
- id: poetry-lock
|
||||
args:
|
||||
- "--check"
|
||||
- "--no-update"
|
||||
|
||||
|
||||
##### Security #####
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.18.4
|
||||
rev: v8.26.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.8.0
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.8.3
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: ["-c", "pyproject.toml"]
|
||||
additional_dependencies: ["bandit[toml]"]
|
||||
|
||||
@@ -129,38 +129,71 @@ Follow these steps to start contributing:
|
||||
|
||||
🚨 **Do not** work on the `main` branch.
|
||||
|
||||
4. for development, we use `poetry` instead of just `pip` to easily track our dependencies.
|
||||
If you don't have it already, follow the [instructions](https://python-poetry.org/docs/#installation) to install it.
|
||||
4. for development, we advise to use a tool like `poetry` or `uv` instead of just `pip` to easily track our dependencies.
|
||||
Follow the instructions to [install poetry](https://python-poetry.org/docs/#installation) (use a version >=2.1.0) or to [install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) if you don't have one of them already.
|
||||
|
||||
Set up a development environment with conda or miniconda:
|
||||
```bash
|
||||
conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev
|
||||
```
|
||||
|
||||
To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
|
||||
If you're using `uv`, it can manage python versions so you can instead do:
|
||||
```bash
|
||||
poetry install --sync --extras "dev test"
|
||||
uv venv --python 3.10 && source .venv/bin/activate
|
||||
```
|
||||
|
||||
To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
|
||||
|
||||
using `poetry`
|
||||
```bash
|
||||
poetry sync --extras "dev test"
|
||||
```
|
||||
|
||||
using `uv`
|
||||
```bash
|
||||
uv sync --extra dev --extra test
|
||||
```
|
||||
|
||||
You can also install the project with all its dependencies (including environments):
|
||||
|
||||
using `poetry`
|
||||
```bash
|
||||
poetry install --sync --all-extras
|
||||
poetry sync --all-extras
|
||||
```
|
||||
|
||||
using `uv`
|
||||
```bash
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
> **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they *will* be tested in the CI. In general, we advise you to install everything and test locally before pushing.
|
||||
|
||||
Whichever command you chose to install the project (e.g. `poetry install --sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
|
||||
Whichever command you chose to install the project (e.g. `poetry sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
|
||||
|
||||
The equivalent of `pip install some-package`, would just be:
|
||||
|
||||
using `poetry`
|
||||
```bash
|
||||
poetry add some-package
|
||||
```
|
||||
|
||||
When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
|
||||
using `uv`
|
||||
```bash
|
||||
poetry lock --no-update
|
||||
uv add some-package
|
||||
```
|
||||
|
||||
When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
|
||||
using `poetry`
|
||||
```bash
|
||||
poetry lock
|
||||
```
|
||||
|
||||
using `uv`
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
|
||||
5. Develop the features on your branch.
|
||||
|
||||
As you work on the features, you should make sure that the test suite
|
||||
@@ -195,7 +228,7 @@ Follow these steps to start contributing:
|
||||
git commit
|
||||
```
|
||||
|
||||
Note, if you already commited some changes that have a wrong formatting, you can use:
|
||||
Note, if you already committed some changes that have a wrong formatting, you can use:
|
||||
```bash
|
||||
pre-commit run --all-files
|
||||
```
|
||||
@@ -236,9 +269,6 @@ Follow these steps to start contributing:
|
||||
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
|
||||
it from PRs ready to be merged;
|
||||
4. Make sure existing tests pass;
|
||||
<!-- 5. Add high-coverage tests. No quality testing = no merge.
|
||||
|
||||
See an example of a good PR here: https://github.com/huggingface/lerobot/pull/ -->
|
||||
|
||||
### Tests
|
||||
|
||||
@@ -258,7 +288,7 @@ sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
```
|
||||
|
||||
Pull artifacts if they're not in [tests/data](tests/data)
|
||||
Pull artifacts if they're not in [tests/artifacts](tests/artifacts)
|
||||
```bash
|
||||
git lfs pull
|
||||
```
|
||||
@@ -267,7 +297,7 @@ We use `pytest` in order to run the tests. From the root of the
|
||||
repository, here's how to run tests with `pytest` for the library:
|
||||
|
||||
```bash
|
||||
DATA_DIR="tests/data" python -m pytest -sv ./tests
|
||||
python -m pytest -sv ./tests
|
||||
```
|
||||
|
||||
|
||||
|
||||
250
Makefile
@@ -1,11 +1,25 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: tests
|
||||
|
||||
PYTHON_PATH := $(shell which python)
|
||||
|
||||
# If Poetry is installed, redefine PYTHON_PATH to use the Poetry-managed Python
|
||||
POETRY_CHECK := $(shell command -v poetry)
|
||||
ifneq ($(POETRY_CHECK),)
|
||||
PYTHON_PATH := $(shell poetry run which python)
|
||||
# If uv is installed and a virtual environment exists, use it
|
||||
UV_CHECK := $(shell command -v uv)
|
||||
ifneq ($(UV_CHECK),)
|
||||
PYTHON_PATH := $(shell .venv/bin/python)
|
||||
endif
|
||||
|
||||
export PATH := $(dir $(PYTHON_PATH)):$(PATH)
|
||||
@@ -20,171 +34,109 @@ build-gpu:
|
||||
|
||||
test-end-to-end:
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-train
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-train-resume
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-train-amp
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-ete-eval-amp
|
||||
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train
|
||||
${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train
|
||||
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online
|
||||
${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-default-ete-eval
|
||||
${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial
|
||||
|
||||
test-act-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
policy.dim_model=64 \
|
||||
env=aloha \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
policy.n_action_steps=20 \
|
||||
policy.chunk_size=20 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/act/
|
||||
--policy.type=act \
|
||||
--policy.dim_model=64 \
|
||||
--policy.n_action_steps=20 \
|
||||
--policy.chunk_size=20 \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=aloha \
|
||||
--env.episode_length=5 \
|
||||
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
--dataset.image_transforms.enable=true \
|
||||
--dataset.episodes="[0]" \
|
||||
--batch_size=2 \
|
||||
--steps=4 \
|
||||
--eval_freq=2 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1 \
|
||||
--save_freq=2 \
|
||||
--save_checkpoint=true \
|
||||
--log_freq=1 \
|
||||
--wandb.enable=false \
|
||||
--output_dir=tests/outputs/act/
|
||||
|
||||
test-act-ete-train-resume:
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
|
||||
test-act-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/act/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-act-ete-train-amp:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
policy.dim_model=64 \
|
||||
env=aloha \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
policy.n_action_steps=20 \
|
||||
policy.chunk_size=20 \
|
||||
training.batch_size=2 \
|
||||
hydra.run.dir=tests/outputs/act_amp/ \
|
||||
training.image_transforms.enable=true \
|
||||
use_amp=true
|
||||
|
||||
test-act-ete-eval-amp:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/act_amp/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
use_amp=true
|
||||
--policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=aloha \
|
||||
--env.episode_length=5 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1
|
||||
|
||||
test-diffusion-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=diffusion \
|
||||
policy.down_dims=\[64,128,256\] \
|
||||
policy.diffusion_step_embed_dim=32 \
|
||||
policy.num_inference_steps=10 \
|
||||
env=pusht \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/diffusion/
|
||||
--policy.type=diffusion \
|
||||
--policy.down_dims='[64,128,256]' \
|
||||
--policy.diffusion_step_embed_dim=32 \
|
||||
--policy.num_inference_steps=10 \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=pusht \
|
||||
--env.episode_length=5 \
|
||||
--dataset.repo_id=lerobot/pusht \
|
||||
--dataset.image_transforms.enable=true \
|
||||
--dataset.episodes="[0]" \
|
||||
--batch_size=2 \
|
||||
--steps=2 \
|
||||
--eval_freq=2 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1 \
|
||||
--save_checkpoint=true \
|
||||
--save_freq=2 \
|
||||
--log_freq=1 \
|
||||
--wandb.enable=false \
|
||||
--output_dir=tests/outputs/diffusion/
|
||||
|
||||
test-diffusion-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/diffusion/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
--policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=pusht \
|
||||
--env.episode_length=5 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1
|
||||
|
||||
test-tdmpc-ete-train:
|
||||
python lerobot/scripts/train.py \
|
||||
policy=tdmpc \
|
||||
env=xarm \
|
||||
env.task=XarmLift-v0 \
|
||||
dataset_repo_id=lerobot/xarm_lift_medium \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=0 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=2 \
|
||||
device=$(DEVICE) \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=2 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/tdmpc/
|
||||
|
||||
test-tdmpc-ete-train-with-online:
|
||||
python lerobot/scripts/train.py \
|
||||
env=pusht \
|
||||
env.gym.obs_type=environment_state_agent_pos \
|
||||
policy=tdmpc_pusht_keypoints \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=10 \
|
||||
device=$(DEVICE) \
|
||||
training.offline_steps=2 \
|
||||
training.online_steps=20 \
|
||||
training.save_checkpoint=false \
|
||||
training.save_freq=10 \
|
||||
training.batch_size=2 \
|
||||
training.online_rollout_n_episodes=2 \
|
||||
training.online_rollout_batch_size=2 \
|
||||
training.online_steps_between_rollouts=10 \
|
||||
training.online_buffer_capacity=15 \
|
||||
eval.use_async_envs=true \
|
||||
hydra.run.dir=tests/outputs/tdmpc_online/
|
||||
|
||||
--policy.type=tdmpc \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=xarm \
|
||||
--env.task=XarmLift-v0 \
|
||||
--env.episode_length=5 \
|
||||
--dataset.repo_id=lerobot/xarm_lift_medium \
|
||||
--dataset.image_transforms.enable=true \
|
||||
--dataset.episodes="[0]" \
|
||||
--batch_size=2 \
|
||||
--steps=2 \
|
||||
--eval_freq=2 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1 \
|
||||
--save_checkpoint=true \
|
||||
--save_freq=2 \
|
||||
--log_freq=1 \
|
||||
--wandb.enable=false \
|
||||
--output_dir=tests/outputs/tdmpc/
|
||||
|
||||
test-tdmpc-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
-p tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-default-ete-eval:
|
||||
python lerobot/scripts/eval.py \
|
||||
--config lerobot/configs/default.yaml \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=8 \
|
||||
device=$(DEVICE) \
|
||||
|
||||
test-act-pusht-tutorial:
|
||||
cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml
|
||||
python lerobot/scripts/train.py \
|
||||
policy=created_by_Makefile.yaml \
|
||||
env=pusht \
|
||||
wandb.enable=False \
|
||||
training.offline_steps=2 \
|
||||
eval.n_episodes=1 \
|
||||
eval.batch_size=1 \
|
||||
env.episode_length=2 \
|
||||
device=$(DEVICE) \
|
||||
training.save_model=true \
|
||||
training.save_freq=2 \
|
||||
training.batch_size=2 \
|
||||
training.image_transforms.enable=true \
|
||||
hydra.run.dir=tests/outputs/act_pusht/
|
||||
rm lerobot/configs/policy/created_by_Makefile.yaml
|
||||
--policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
|
||||
--policy.device=$(DEVICE) \
|
||||
--env.type=xarm \
|
||||
--env.episode_length=5 \
|
||||
--env.task=XarmLift-v0 \
|
||||
--eval.n_episodes=1 \
|
||||
--eval.batch_size=1
|
||||
|
||||
173
README.md
@@ -23,15 +23,38 @@
|
||||
</div>
|
||||
|
||||
<h2 align="center">
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">New robot in town: SO-100</a></p>
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/12_use_so101.md">
|
||||
Build Your Own SO-101 Robot!</a></p>
|
||||
</h2>
|
||||
|
||||
<div align="center">
|
||||
<img src="media/so100/leader_follower.webp?raw=true" alt="SO-100 leader and follower arms" title="SO-100 leader and follower arms" width="50%">
|
||||
<p>We just added a new tutorial on how to build a more affordable robot, at the price of $110 per arm!</p>
|
||||
<p>Teach it new skills by showing it a few moves with just a laptop.</p>
|
||||
<p>Then watch your homemade robot act autonomously 🤯</p>
|
||||
<p>Follow the link to the <a href="https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md">full tutorial for SO-100</a>.</p>
|
||||
<div style="display: flex; gap: 1rem; justify-content: center; align-items: center;" >
|
||||
<img
|
||||
src="media/so101/so101.webp?raw=true"
|
||||
alt="SO-101 follower arm"
|
||||
title="SO-101 follower arm"
|
||||
style="width: 40%;"
|
||||
/>
|
||||
<img
|
||||
src="media/so101/so101-leader.webp?raw=true"
|
||||
alt="SO-101 leader arm"
|
||||
title="SO-101 leader arm"
|
||||
style="width: 40%;"
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
<p><strong>Meet the updated SO100, the SO-101 – Just €114 per arm!</strong></p>
|
||||
<p>Train it in minutes with a few simple moves on your laptop.</p>
|
||||
<p>Then sit back and watch your creation act autonomously! 🤯</p>
|
||||
|
||||
<p><a href="https://github.com/huggingface/lerobot/blob/main/examples/12_use_so101.md">
|
||||
See the full SO-101 tutorial here.</a></p>
|
||||
|
||||
<p>Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!</p>
|
||||
<p>Check out the <a href="https://github.com/huggingface/lerobot/blob/main/examples/11_use_lekiwi.md">LeKiwi tutorial</a> and bring your robot to life on wheels.</p>
|
||||
|
||||
<img src="media/lekiwi/kiwi.webp?raw=true" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%">
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
@@ -42,7 +65,6 @@
|
||||
|
||||
---
|
||||
|
||||
|
||||
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
|
||||
|
||||
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
|
||||
@@ -55,9 +77,9 @@
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td><img src="http://remicadene.com/assets/gif/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
|
||||
<td><img src="http://remicadene.com/assets/gif/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
|
||||
<td><img src="http://remicadene.com/assets/gif/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
|
||||
<td><img src="media/gym/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
|
||||
<td><img src="media/gym/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
|
||||
<td><img src="media/gym/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">ACT policy on ALOHA env</td>
|
||||
@@ -68,7 +90,7 @@
|
||||
|
||||
### Acknowledgment
|
||||
|
||||
- Thanks to Tony Zaho, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
|
||||
- Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
|
||||
- Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io).
|
||||
- Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM).
|
||||
- Thanks to Antonio Loquercio and Ashish Kumar for their early support.
|
||||
@@ -89,14 +111,25 @@ conda create -y -n lerobot python=3.10
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
> **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
|
||||
> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
|
||||
> ```bash
|
||||
> conda install ffmpeg=7.1.1 -c conda-forge
|
||||
> ```
|
||||
> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
> **NOTE:** Depending on your platform, If you encounter any build errors during this step
|
||||
you may need to install `cmake` and `build-essential` for building some of our dependencies.
|
||||
On linux: `sudo apt-get install cmake build-essential`
|
||||
> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run:
|
||||
`sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
|
||||
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
|
||||
- [aloha](https://github.com/huggingface/gym-aloha)
|
||||
@@ -122,10 +155,7 @@ wandb login
|
||||
├── examples # contains demonstration examples, start here to learn about LeRobot
|
||||
| └── advanced # contains even more examples for those who have mastered the basics
|
||||
├── lerobot
|
||||
| ├── configs # contains hydra yaml files with all options that you can override in the command line
|
||||
| | ├── default.yaml # selected by default, it loads pusht environment and diffusion policy
|
||||
| | ├── env # various sim environments and their datasets: aloha.yaml, pusht.yaml, xarm.yaml
|
||||
| | └── policy # various policies: act.yaml, diffusion.yaml, tdmpc.yaml
|
||||
| ├── configs # contains config classes with all options that you can override in the command line
|
||||
| ├── common # contains classes and utilities
|
||||
| | ├── datasets # various datasets of human demonstrations: aloha, pusht, xarm
|
||||
| | ├── envs # various sim environments: aloha, pusht, xarm
|
||||
@@ -144,7 +174,7 @@ wandb login
|
||||
|
||||
### Visualize datasets
|
||||
|
||||
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically download data from the Hugging Face hub.
|
||||
Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub.
|
||||
|
||||
You can also locally visualize episodes from a dataset on the hub by executing our script from the command line:
|
||||
```bash
|
||||
@@ -153,10 +183,12 @@ python lerobot/scripts/visualize_dataset.py \
|
||||
--episode-index 0
|
||||
```
|
||||
|
||||
or from a dataset in a local folder with the root `DATA_DIR` environment variable (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
|
||||
or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
|
||||
```bash
|
||||
DATA_DIR='./my_local_data_dir' python lerobot/scripts/visualize_dataset.py \
|
||||
python lerobot/scripts/visualize_dataset.py \
|
||||
--repo-id lerobot/pusht \
|
||||
--root ./my_local_data_dir \
|
||||
--local-files-only 1 \
|
||||
--episode-index 0
|
||||
```
|
||||
|
||||
@@ -189,7 +221,7 @@ dataset attributes:
|
||||
│ ├ episode_index (int64): index of the episode for this sample
|
||||
│ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode
|
||||
│ ├ timestamp (float32): timestamp in the episode
|
||||
│ ├ next.done (bool): indicates the end of en episode ; True for the last frame in each episode
|
||||
│ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode
|
||||
│ └ index (int64): general index in the whole dataset
|
||||
├ episode_data_index: contains 2 tensors with the start and end indices of each episode
|
||||
│ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0
|
||||
@@ -208,12 +240,10 @@ dataset attributes:
|
||||
|
||||
A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely:
|
||||
- hf_dataset stored using Hugging Face datasets library serialization to parquet
|
||||
- videos are stored in mp4 format to save space or png files
|
||||
- episode_data_index saved using `safetensor` tensor serialization format
|
||||
- stats saved using `safetensor` tensor serialization format
|
||||
- info are saved using JSON
|
||||
- videos are stored in mp4 format to save space
|
||||
- metadata are stored in plain json/jsonl files
|
||||
|
||||
Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can set the `DATA_DIR` environment variable to your root dataset folder as illustrated in the above section on dataset visualization.
|
||||
Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location.
|
||||
|
||||
### Evaluate a pretrained policy
|
||||
|
||||
@@ -222,15 +252,18 @@ Check out [example 2](./examples/2_evaluate_pretrained_policy.py) that illustrat
|
||||
We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht):
|
||||
```bash
|
||||
python lerobot/scripts/eval.py \
|
||||
-p lerobot/diffusion_pusht \
|
||||
eval.n_episodes=10 \
|
||||
eval.batch_size=10
|
||||
--policy.path=lerobot/diffusion_pusht \
|
||||
--env.type=pusht \
|
||||
--eval.batch_size=10 \
|
||||
--eval.n_episodes=10 \
|
||||
--policy.use_amp=false \
|
||||
--policy.device=cuda
|
||||
```
|
||||
|
||||
Note: After training your own policy, you can re-evaluate the checkpoints with:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/eval.py -p {OUTPUT_DIR}/checkpoints/last/pretrained_model
|
||||
python lerobot/scripts/eval.py --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
@@ -239,70 +272,28 @@ See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
|
||||
Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
|
||||
|
||||
In general, you can use our training script to easily train any policy. Here is an example of training the ACT policy on trajectories collected by humans on the Aloha simulation environment for the insertion task:
|
||||
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
env=aloha \
|
||||
env.task=AlohaInsertion-v0 \
|
||||
dataset_repo_id=lerobot/aloha_sim_insertion_human \
|
||||
```
|
||||
|
||||
The experiment directory is automatically generated and will show up in yellow in your terminal. It looks like `outputs/train/2024-05-05/20-21-12_aloha_act_default`. You can manually specify an experiment directory by adding this argument to the `train.py` python command:
|
||||
```bash
|
||||
hydra.run.dir=your/new/experiment/dir
|
||||
```
|
||||
|
||||
In the experiment directory there will be a folder called `checkpoints` which will have the following structure:
|
||||
|
||||
```bash
|
||||
checkpoints
|
||||
├── 000250 # checkpoint_dir for training step 250
|
||||
│ ├── pretrained_model # Hugging Face pretrained model dir
|
||||
│ │ ├── config.json # Hugging Face pretrained model config
|
||||
│ │ ├── config.yaml # consolidated Hydra config
|
||||
│ │ ├── model.safetensors # model weights
|
||||
│ │ └── README.md # Hugging Face model card
|
||||
│ └── training_state.pth # optimizer/scheduler/rng state and training step
|
||||
```
|
||||
|
||||
To resume training from a checkpoint, you can add these to the `train.py` python command:
|
||||
```bash
|
||||
hydra.run.dir=your/original/experiment/dir resume=true
|
||||
```
|
||||
|
||||
It will load the pretrained model, optimizer and scheduler states for training. For more information please see our tutorial on training resumption [here](https://github.com/huggingface/lerobot/blob/main/examples/5_resume_training.md).
|
||||
|
||||
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding:
|
||||
|
||||
```bash
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explaination of some commonly used metrics in logs.
|
||||
A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](./examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs.
|
||||
|
||||

|
||||
|
||||
Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions.
|
||||
|
||||
#### Reproduce state-of-the-art (SOTA)
|
||||
|
||||
We have organized our configuration files (found under [`lerobot/configs`](./lerobot/configs)) such that they reproduce SOTA results from a given model variant in their respective original works. Simply running:
|
||||
|
||||
We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances.
|
||||
You can reproduce their training by loading the config from their run. Simply running:
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=diffusion env=pusht
|
||||
python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht
|
||||
```
|
||||
|
||||
reproduces SOTA results for Diffusion Policy on the PushT task.
|
||||
|
||||
Pretrained policies, along with reproduction details, can be found under the "Models" section of https://huggingface.co/lerobot.
|
||||
|
||||
## Contribute
|
||||
|
||||
If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md).
|
||||
|
||||
### Add a new dataset
|
||||
<!-- ### Add a new dataset
|
||||
|
||||
To add a dataset to the hub, you need to login using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
@@ -320,7 +311,7 @@ python lerobot/scripts/push_dataset_to_hub.py \
|
||||
|
||||
See `python lerobot/scripts/push_dataset_to_hub.py --help` for more instructions.
|
||||
|
||||
If your dataset format is not supported, implement your own in `lerobot/common/datasets/push_dataset_to_hub/${raw_format}_format.py` by copying examples like [pusht_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py), [umi_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py), [aloha_hdf5](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py), or [xarm_pkl](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py).
|
||||
If your dataset format is not supported, implement your own in `lerobot/common/datasets/push_dataset_to_hub/${raw_format}_format.py` by copying examples like [pusht_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py), [umi_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py), [aloha_hdf5](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py), or [xarm_pkl](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py). -->
|
||||
|
||||
|
||||
### Add a pretrained policy
|
||||
@@ -330,7 +321,7 @@ Once you have trained a policy you may upload it to the Hugging Face hub using a
|
||||
You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain:
|
||||
- `config.json`: A serialized version of the policy configuration (following the policy's dataclass config).
|
||||
- `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format.
|
||||
- `config.yaml`: A consolidated Hydra training configuration containing the policy, environment, and dataset configs. The policy configuration should match `config.json` exactly. The environment config is useful for anyone who wants to evaluate your policy. The dataset config just serves as a paper trail for reproducibility.
|
||||
- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility.
|
||||
|
||||
To upload these to the hub, run the following:
|
||||
```bash
|
||||
@@ -369,7 +360,7 @@ with profile(
|
||||
If you want, you can cite this work with:
|
||||
```bibtex
|
||||
@misc{cadene2024lerobot,
|
||||
author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Wolf, Thomas},
|
||||
author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascale, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas},
|
||||
title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch},
|
||||
howpublished = "\url{https://github.com/huggingface/lerobot}",
|
||||
year = {2024}
|
||||
@@ -417,3 +408,19 @@ Additionally, if you are using any of the particular policy architecture, pretra
|
||||
year={2024}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
- [HIL-SERL](https://hil-serl.github.io/)
|
||||
```bibtex
|
||||
@Article{luo2024hilserl,
|
||||
title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning},
|
||||
author={Jianlan Luo and Charles Xu and Jeffrey Wu and Sergey Levine},
|
||||
year={2024},
|
||||
eprint={2410.21845},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.RO}
|
||||
}
|
||||
```
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#huggingface/lerobot&Timeline)
|
||||
|
||||
@@ -21,7 +21,7 @@ How to decode videos?
|
||||
|
||||
## Variables
|
||||
**Image content & size**
|
||||
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an appartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
|
||||
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
|
||||
For these reasons, we run this benchmark on four representative datasets:
|
||||
- `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
|
||||
- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
|
||||
@@ -51,7 +51,7 @@ For a comprehensive list and documentation of these parameters, see the ffmpeg d
|
||||
### Decoding parameters
|
||||
**Decoder**
|
||||
We tested two video decoding backends from torchvision:
|
||||
- `pyav` (default)
|
||||
- `pyav`
|
||||
- `video_reader` (requires to build torchvision from source)
|
||||
|
||||
**Requested timestamps**
|
||||
@@ -63,7 +63,7 @@ This of course is affected by the `-g` parameter during encoding, which specifie
|
||||
|
||||
Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
|
||||
|
||||
Additionally, because some policies might request single timestamps that are a few frames appart, we also have the following scenario:
|
||||
Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario:
|
||||
- `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
|
||||
|
||||
However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
|
||||
@@ -85,8 +85,8 @@ However, due to how video decoding is implemented with `pyav`, we don't have acc
|
||||
**Average Structural Similarity Index Measure (higher is better)**
|
||||
`avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity.
|
||||
|
||||
One aspect that can't be measured here with those metrics is the compatibility of the encoding accross platforms, in particular on web browser, for visualization purposes.
|
||||
h264, h265 and AV1 are all commonly used codecs and should not be pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
|
||||
One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes.
|
||||
h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
|
||||
- `yuv420p` is more widely supported across various platforms, including web browsers.
|
||||
- `yuv444p` offers higher color fidelity but might not be supported as broadly.
|
||||
|
||||
@@ -114,9 +114,9 @@ We tried to measure the most impactful parameters for both encoding and decoding
|
||||
|
||||
Additional encoding parameters exist that are not included in this benchmark. In particular:
|
||||
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
|
||||
- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
|
||||
- `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.).
|
||||
|
||||
See the documentation mentioned above for more detailled info on these settings and for a more comprehensive list of other parameters.
|
||||
See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
|
||||
|
||||
Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
|
||||
- `torchaudio`
|
||||
|
||||
@@ -17,12 +17,21 @@
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import rerun as rr
|
||||
|
||||
# see https://rerun.io/docs/howto/visualization/limit-ram
|
||||
RERUN_MEMORY_LIMIT = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "5%")
|
||||
|
||||
|
||||
def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int):
|
||||
def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int, duration: int):
|
||||
rr.init("lerobot_capture_camera_feed")
|
||||
rr.spawn(memory_limit=RERUN_MEMORY_LIMIT)
|
||||
|
||||
now = dt.datetime.now()
|
||||
capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}"
|
||||
if not capture_dir.exists():
|
||||
@@ -39,24 +48,21 @@ def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
|
||||
|
||||
frame_index = 0
|
||||
while True:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < duration:
|
||||
ret, frame = cap.read()
|
||||
|
||||
if not ret:
|
||||
print("Error: Could not read frame.")
|
||||
break
|
||||
|
||||
cv2.imshow("Video Stream", frame)
|
||||
rr.log("video/stream", rr.Image(frame.numpy()), static=True)
|
||||
cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame)
|
||||
frame_index += 1
|
||||
|
||||
# Break the loop on 'q' key press
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
# Release the capture and destroy all windows
|
||||
# Release the capture
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
# TODO(Steven): Add a graceful shutdown via a close() method for the Viewer context, though not currently supported in the Rerun API.
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -86,5 +92,11 @@ if __name__ == "__main__":
|
||||
default=720,
|
||||
help="Height of the captured images.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--duration",
|
||||
type=int,
|
||||
default=20,
|
||||
help="Duration in seconds for which the video stream should be captured.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
display_and_save_video_stream(**vars(args))
|
||||
|
||||
@@ -67,7 +67,7 @@ def parse_int_or_none(value) -> int | None:
|
||||
def check_datasets_formats(repo_ids: list) -> None:
|
||||
for repo_id in repo_ids:
|
||||
dataset = LeRobotDataset(repo_id)
|
||||
if dataset.video:
|
||||
if len(dataset.meta.video_keys) > 0:
|
||||
raise ValueError(
|
||||
f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}"
|
||||
)
|
||||
@@ -266,7 +266,7 @@ def benchmark_encoding_decoding(
|
||||
)
|
||||
|
||||
ep_num_images = dataset.episode_data_index["to"][0].item()
|
||||
width, height = tuple(dataset[0][dataset.camera_keys[0]].shape[-2:])
|
||||
width, height = tuple(dataset[0][dataset.meta.camera_keys[0]].shape[-2:])
|
||||
num_pixels = width * height
|
||||
video_size_bytes = video_path.stat().st_size
|
||||
images_size_bytes = get_directory_size(imgs_dir)
|
||||
@@ -416,7 +416,7 @@ if __name__ == "__main__":
|
||||
"--vcodec",
|
||||
type=str,
|
||||
nargs="*",
|
||||
default=["libx264", "libx265", "libsvtav1"],
|
||||
default=["libx264", "hevc", "libsvtav1"],
|
||||
help="Video codecs to be tested",
|
||||
)
|
||||
parser.add_argument(
|
||||
@@ -446,7 +446,7 @@ if __name__ == "__main__":
|
||||
# nargs="*",
|
||||
# default=[0, 1],
|
||||
# help="Use the fastdecode tuning option. 0 disables it. "
|
||||
# "For libx264 and libx265, only 1 is possible. "
|
||||
# "For libx264 and libx265/hevc, only 1 is possible. "
|
||||
# "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization",
|
||||
# )
|
||||
parser.add_argument(
|
||||
|
||||
@@ -1,32 +1,29 @@
|
||||
# Configure image
|
||||
ARG PYTHON_VERSION=3.10
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim
|
||||
|
||||
# Configure environment variables
|
||||
ARG PYTHON_VERSION
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install apt dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create virtual environment
|
||||
RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
|
||||
RUN python -m venv /opt/venv
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV MUJOCO_GL="egl"
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Install LeRobot
|
||||
# Install dependencies and set up Python in a single layer
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake git \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher libgeos-dev \
|
||||
&& ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
|
||||
&& python -m venv /opt/venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/* \
|
||||
&& echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Clone repository and install LeRobot in a single layer
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
&& /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Execute in bash shell rather than python
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@@ -13,8 +13,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
sed gawk grep curl wget zip unzip \
|
||||
tcpdump sysstat screen tmux \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
|
||||
speech-dispatcher \
|
||||
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
|
||||
speech-dispatcher portaudio19-dev libgeos-dev \
|
||||
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv python${PYTHON_VERSION}-dev \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install ffmpeg build dependencies. See:
|
||||
|
||||
@@ -1,30 +1,24 @@
|
||||
FROM nvidia/cuda:12.4.1-base-ubuntu22.04
|
||||
|
||||
# Configure image
|
||||
# Configure environment variables
|
||||
ARG PYTHON_VERSION=3.10
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
# Install apt dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher \
|
||||
python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Create virtual environment
|
||||
RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
|
||||
RUN python -m venv /opt/venv
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV MUJOCO_GL="egl"
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Install LeRobot
|
||||
# Install dependencies and set up Python in a single layer
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential cmake git \
|
||||
libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
|
||||
speech-dispatcher libgeos-dev \
|
||||
python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||
&& ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
|
||||
&& python -m venv /opt/venv \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/* \
|
||||
&& echo "source /opt/venv/bin/activate" >> /root/.bashrc
|
||||
|
||||
# Clone repository and install LeRobot in a single layer
|
||||
COPY . /lerobot
|
||||
WORKDIR /lerobot
|
||||
RUN pip install --upgrade --no-cache-dir pip
|
||||
RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
|
||||
|
||||
# Set EGL as the rendering backend for MuJoCo
|
||||
ENV MUJOCO_GL="egl"
|
||||
RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
|
||||
&& /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
|
||||
|
||||
137
docs/README.md
Normal file
@@ -0,0 +1,137 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
you can install them with the following command, at the root of the code repository:
|
||||
|
||||
```bash
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download)
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look before committing for instance). You don't have to `git commit` the built documentation.
|
||||
|
||||
---
|
||||
|
||||
## Building the documentation
|
||||
|
||||
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
|
||||
typing the following command:
|
||||
|
||||
```bash
|
||||
doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||
Markdown editor.
|
||||
|
||||
## Previewing the documentation
|
||||
|
||||
To preview the docs, first install the `watchdog` module with:
|
||||
|
||||
```bash
|
||||
pip install watchdog
|
||||
```
|
||||
|
||||
Then run the following command:
|
||||
|
||||
```bash
|
||||
doc-builder preview lerobot docs/source/
|
||||
```
|
||||
|
||||
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
|
||||
|
||||
---
|
||||
|
||||
## Adding a new element to the navigation bar
|
||||
|
||||
Accepted files are Markdown (.md).
|
||||
|
||||
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file.
|
||||
|
||||
## Renaming section headers and moving sections
|
||||
|
||||
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
|
||||
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||
|
||||
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
and of course, if you moved it to another file, then:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
|
||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||
|
||||
For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
|
||||
|
||||
### Adding a new tutorial
|
||||
|
||||
Adding a new tutorial or section is done in two steps:
|
||||
|
||||
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR.
|
||||
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||
and objects like True, None or any strings should usually be put in `code`.
|
||||
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
|
||||
|
||||
|
||||
````
|
||||
```
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
```
|
||||
````
|
||||
|
||||
#### Adding an image
|
||||
|
||||
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
||||
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
|
||||
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
||||
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
|
||||
to this dataset.
|
||||
28
docs/source/_toctree.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
- sections:
|
||||
- local: index
|
||||
title: LeRobot
|
||||
- local: installation
|
||||
title: Installation
|
||||
title: Get started
|
||||
- sections:
|
||||
- local: getting_started_real_world_robot
|
||||
title: Getting Started with Real-World Robots
|
||||
- local: cameras
|
||||
title: Cameras
|
||||
- local: hilserl
|
||||
title: Getting Started with Reinforcement Learning
|
||||
title: "Tutorials"
|
||||
- sections:
|
||||
- local: so101
|
||||
title: SO-101
|
||||
- local: so100
|
||||
title: SO-100
|
||||
- local: koch
|
||||
title: Koch v1.1
|
||||
- local: lekiwi
|
||||
title: LeKiwi
|
||||
title: "Robots"
|
||||
- sections:
|
||||
- local: contributing
|
||||
title: Contribute to LeRobot
|
||||
title: "Contribute"
|
||||
173
docs/source/cameras.mdx
Normal file
@@ -0,0 +1,173 @@
|
||||
# Cameras
|
||||
|
||||
LeRobot offers multiple options for video capture, including phone cameras, built-in laptop cameras, external webcams, and Intel RealSense cameras. To efficiently record frames from most cameras, you can use either the `OpenCVCamera` or `RealSenseCamera` class. For additional compatibility details on the `OpenCVCamera` class, refer to the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
|
||||
|
||||
### Finding your camera
|
||||
|
||||
To instantiate a camera, you need a camera identifier. This identifier might change if you reboot your computer or re-plug your camera, a behavior mostly dependant on your operating system.
|
||||
|
||||
To find the camera indices of the cameras plugged into your system, run the following script:
|
||||
```bash
|
||||
python lerobot/find_cameras.py opencv # or realsense for Intel Realsense cameras
|
||||
```
|
||||
|
||||
The output will look something like this if you have two cameras connected:
|
||||
```
|
||||
--- Detected Cameras ---
|
||||
Camera #0:
|
||||
Name: OpenCV Camera @ 0
|
||||
Type: OpenCV
|
||||
Id: 0
|
||||
Backend api: AVFOUNDATION
|
||||
Default stream profile:
|
||||
Format: 16.0
|
||||
Width: 1920
|
||||
Height: 1080
|
||||
Fps: 15.0
|
||||
--------------------
|
||||
(more cameras ...)
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> When using Intel RealSense cameras in `macOS`, you could get this [error](https://github.com/IntelRealSense/librealsense/issues/12307): `Error finding RealSense cameras: failed to set power state`, this can be solved by running the same command with `sudo` permissions. Note that using RealSense cameras in `macOS` is unstable.
|
||||
|
||||
|
||||
## Use Cameras
|
||||
|
||||
Below are two examples, demonstrating how to work with the API.
|
||||
|
||||
- **Asynchronous frame capture** using an OpenCV-based camera
|
||||
- **Color and depth capture** using an Intel RealSense camera
|
||||
|
||||
|
||||
<hfoptions id="shell_restart">
|
||||
<hfoption id="Open CV Camera">
|
||||
|
||||
```python
|
||||
from lerobot.common.cameras.opencv.configuration_opencv import OpenCVCameraConfig
|
||||
from lerobot.common.cameras.opencv.camera_opencv import OpenCVCamera
|
||||
from lerobot.common.cameras.configs import ColorMode, Cv2Rotation
|
||||
|
||||
# Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation.
|
||||
config = OpenCVCameraConfig(
|
||||
index_or_path=0,
|
||||
fps=15,
|
||||
width=1920,
|
||||
height=1080,
|
||||
color_mode=ColorMode.RGB,
|
||||
rotation=Cv2Rotation.NO_ROTATION
|
||||
)
|
||||
|
||||
# Instantiate and connect an `OpenCVCamera`, performing a warm-up read (default).
|
||||
camera = OpenCVCamera(config)
|
||||
camera.connect()
|
||||
|
||||
# Read frames asynchronously in a loop via `async_read(timeout_ms)`
|
||||
try:
|
||||
for i in range(10):
|
||||
frame = camera.async_read(timeout_ms=200)
|
||||
print(f"Async frame {i} shape:", frame.shape)
|
||||
finally:
|
||||
camera.disconnect()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Intel Realsense Camera">
|
||||
|
||||
```python
|
||||
from lerobot.common.cameras.intel.configuration_realsense import RealSenseCameraConfig
|
||||
from lerobot.common.cameras.intel.camera_realsense import RealSenseCamera
|
||||
from lerobot.common.cameras.configs import ColorMode, Cv2Rotation
|
||||
|
||||
# Create a `RealSenseCameraConfig` specifying your camera’s serial number and enabling depth.
|
||||
config = RealSenseCameraConfig(
|
||||
serial_number="233522074606",
|
||||
fps=15,
|
||||
width=640,
|
||||
height=480,
|
||||
color_mode=ColorMode.RGB,
|
||||
use_depth=True,
|
||||
rotation=Cv2Rotation.NO_ROTATION
|
||||
)
|
||||
|
||||
# Instantiate and connect a `RealSenseCamera` with warm-up read (default).
|
||||
camera = RealSenseCamera(config)
|
||||
camera.connect()
|
||||
|
||||
# Capture a color frame via `read()` and a depth map via `read_depth()`.
|
||||
try:
|
||||
color_frame = camera.read()
|
||||
depth_map = camera.read_depth()
|
||||
print("Color frame shape:", color_frame.shape)
|
||||
print("Depth map shape:", depth_map.shape)
|
||||
finally:
|
||||
camera.disconnect()
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
|
||||
## Use your phone
|
||||
<hfoptions id="use phone">
|
||||
<hfoption id="Mac">
|
||||
|
||||
To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
|
||||
- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
|
||||
- Sign in both devices with the same Apple ID.
|
||||
- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
|
||||
|
||||
For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
|
||||
|
||||
Your iPhone should be detected automatically when running the camera setup script in the next section.
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Linux">
|
||||
|
||||
If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
|
||||
|
||||
1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
|
||||
```python
|
||||
sudo apt install v4l2loopback-dkms v4l-utils
|
||||
```
|
||||
2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
|
||||
3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
|
||||
```python
|
||||
flatpak install flathub com.obsproject.Studio
|
||||
```
|
||||
4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
|
||||
```python
|
||||
flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
|
||||
```
|
||||
5. *Start OBS Studio*. Launch with:
|
||||
```python
|
||||
flatpak run com.obsproject.Studio
|
||||
```
|
||||
6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
|
||||
7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
|
||||
8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
|
||||
9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
|
||||
```python
|
||||
v4l2-ctl --list-devices
|
||||
```
|
||||
You should see an entry like:
|
||||
```
|
||||
VirtualCam (platform:v4l2loopback-000):
|
||||
/dev/video1
|
||||
```
|
||||
10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
|
||||
```python
|
||||
v4l2-ctl -d /dev/video1 --get-fmt-video
|
||||
```
|
||||
You should see an entry like:
|
||||
```
|
||||
>>> Format Video Capture:
|
||||
>>> Width/Height : 640/480
|
||||
>>> Pixel Format : 'YUYV' (YUYV 4:2:2)
|
||||
```
|
||||
|
||||
Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
|
||||
|
||||
If everything is set up correctly, you can proceed with the rest of the tutorial.
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
1
docs/source/contributing.md
Symbolic link
@@ -0,0 +1 @@
|
||||
../../CONTRIBUTING.md
|
||||
321
docs/source/getting_started_real_world_robot.mdx
Normal file
@@ -0,0 +1,321 @@
|
||||
# Getting Started with Real-World Robots
|
||||
|
||||
This tutorial will explain how to train a neural network to control a real robot autonomously.
|
||||
|
||||
**You'll learn:**
|
||||
1. How to record and visualize your dataset.
|
||||
2. How to train a policy using your data and prepare it for evaluation.
|
||||
3. How to evaluate your policy and visualize the results.
|
||||
|
||||
By following these steps, you'll be able to replicate tasks, such as picking up a Lego block and placing it in a bin with a high success rate, as shown in the video below.
|
||||
|
||||
<details>
|
||||
<summary><strong>Video: pickup lego block task</strong></summary>
|
||||
|
||||
<div class="video-container">
|
||||
<video controls width="600">
|
||||
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot_task.mp4" type="video/mp4" />
|
||||
</video>
|
||||
</div>
|
||||
|
||||
</details>
|
||||
|
||||
This tutorial isn’t tied to a specific robot: we walk you through the commands and API snippets you can adapt for any supported platform.
|
||||
|
||||
During data collection, you’ll use a “teloperation” device, such as a leader arm or keyboard to teleoperate the robot and record its motion trajectories.
|
||||
|
||||
Once you’ve gathered enough trajectories, you’ll train a neural network to imitate these trajectories and deploy the trained model so your robot can perform the task autonomously.
|
||||
|
||||
If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support.
|
||||
|
||||
## Set up and Calibrate
|
||||
|
||||
If you haven't yet set up and calibrated your robot and teleop device, please do so by following the robot-specific tutorial.
|
||||
|
||||
## Teleoperate
|
||||
|
||||
In this example, we’ll demonstrate how to teleoperate the SO101 robot. For each command, we also provide a corresponding API example.
|
||||
|
||||
<hfoptions id="teleoperate_so101">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=my_red_robot_arm \
|
||||
--teleop.type=so101_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \
|
||||
--teleop.id=my_blue_leader_arm
|
||||
```
|
||||
</hfoption>
|
||||
<hfoption id="API example">
|
||||
```python
|
||||
from lerobot.common.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader
|
||||
from lerobot.common.robots.so101_follower import SO101FollowerConfig, SO101Follower
|
||||
|
||||
robot_config = SO101FollowerConfig(
|
||||
port="/dev/tty.usbmodem58760431541",
|
||||
id="my_red_robot_arm",
|
||||
)
|
||||
|
||||
teleop_config = SO101LeaderConfig(
|
||||
port="/dev/tty.usbmodem58760431551",
|
||||
id="my_blue_leader_arm",
|
||||
)
|
||||
|
||||
robot = SO101Follower(robot_config)
|
||||
teleop_device = SO101Leader(teleop_config)
|
||||
robot.connect()
|
||||
teleop_device.connect()
|
||||
|
||||
while True:
|
||||
action = teleop_device.get_action()
|
||||
robot.send_action(action)
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The teleoperate command will automatically:
|
||||
1. Identify any missing calibrations and initiate the calibration procedure.
|
||||
2. Connect the robot and teleop device and start teleoperation.
|
||||
|
||||
## Cameras
|
||||
|
||||
To add cameras to your setup, follow this [Guide](./cameras#setup-cameras).
|
||||
|
||||
## Teleoperate with cameras
|
||||
|
||||
With `rerun`, you can teleoperate again while simultaneously visualizing the camera feeds and joint positions. In this example, we’re using the Koch arm.
|
||||
|
||||
<hfoptions id="teleoperate_koch_camera">
|
||||
<hfoption id="Command">
|
||||
```bash
|
||||
python -m lerobot.teleoperate \
|
||||
--robot.type=koch_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=my_koch_robot \
|
||||
--robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
|
||||
--teleop.type=koch_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \
|
||||
--teleop.id=my_koch_teleop \
|
||||
--display_data=true
|
||||
```
|
||||
</hfoption>
|
||||
<hfoption id="API example">
|
||||
```python
|
||||
from lerobot.common.cameras.opencv.configuration_opencv import OpenCVCameraConfig
|
||||
from lerobot.common.teleoperators.koch_leader import KochLeaderConfig, KochLeader
|
||||
from lerobot.common.robots.koch_follower import KochFollowerConfig, KochFollower
|
||||
|
||||
camera_config = {
|
||||
"front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30)
|
||||
}
|
||||
|
||||
robot_config = KochFollowerConfig(
|
||||
port="/dev/tty.usbmodem585A0076841",
|
||||
id="my_red_robot_arm",
|
||||
cameras=camera_config
|
||||
)
|
||||
|
||||
teleop_config = KochLeaderConfig(
|
||||
port="/dev/tty.usbmodem58760431551",
|
||||
id="my_blue_leader_arm",
|
||||
)
|
||||
|
||||
robot = KochFollower(robot_config)
|
||||
teleop_device = KochLeader(teleop_config)
|
||||
robot.connect()
|
||||
teleop_device.connect()
|
||||
|
||||
while True:
|
||||
observation = robot.get_observation()
|
||||
action = teleop_device.get_action()
|
||||
robot.send_action(action)
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset.
|
||||
|
||||
We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
|
||||
|
||||
Add your token to the CLI by running this command:
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Then store your Hugging Face repository name in a variable:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Now you can record a dataset. To record 2 episodes and upload your dataset to the hub, execute this command tailored to the SO101.
|
||||
```bash
|
||||
python -m lerobot.record \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem585A0076841 \
|
||||
--robot.id=my_red_robot_arm \
|
||||
--robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
|
||||
--teleop.type=so101_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \
|
||||
--teleop.id=my_blue_leader_arm \
|
||||
--display_data=true \
|
||||
--dataset.repo_id=aliberts/record-test \
|
||||
--dataset.num_episodes=2 \
|
||||
--dataset.single_task="Grab the black cube"
|
||||
```
|
||||
|
||||
#### Dataset upload
|
||||
Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
|
||||
```bash
|
||||
echo https://huggingface.co/datasets/${HF_USER}/so101_test
|
||||
```
|
||||
Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
|
||||
|
||||
You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
|
||||
|
||||
#### Record function
|
||||
|
||||
The `record` function provides a suite of tools for capturing and managing data during robot operation:
|
||||
|
||||
##### 1. Data Storage
|
||||
- Data is stored using the `LeRobotDataset` format and is stored on disk during recording.
|
||||
- By default, the dataset is pushed to your Hugging Face page after recording.
|
||||
- To disable uploading, use `--dataset.push_to_hub=False`.
|
||||
|
||||
##### 2. Checkpointing and Resuming
|
||||
- Checkpoints are automatically created during recording.
|
||||
- If an issue occurs, you can resume by re-running the same command with `--control.resume=true`.
|
||||
- To start recording from scratch, **manually delete** the dataset directory.
|
||||
|
||||
##### 3. Recording Parameters
|
||||
Set the flow of data recording using command-line arguments:
|
||||
- `--dataset.episode_time_s=60`
|
||||
Duration of each data recording episode (default: **60 seconds**).
|
||||
- `--dataset.reset_time_s=60`
|
||||
Duration for resetting the environment after each episode (default: **60 seconds**).
|
||||
- `--dataset.num_episodes=50`
|
||||
Total number of episodes to record (default: **50**).
|
||||
|
||||
##### 4. Keyboard Controls During Recording
|
||||
Control the data recording flow using keyboard shortcuts:
|
||||
- Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next.
|
||||
- Press **Left Arrow (`←`)**: Cancel the current episode and re-record it.
|
||||
- Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset.
|
||||
|
||||
#### Tips for gathering data
|
||||
|
||||
Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images.
|
||||
|
||||
In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
|
||||
|
||||
Avoid adding too much variation too quickly, as it may hinder your results.
|
||||
|
||||
If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset.
|
||||
|
||||
|
||||
#### Troubleshooting:
|
||||
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so101_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--control.push_to_hub=false`, you can visualize it locally with (via a window in the browser `http://127.0.0.1:9090` with the visualization tool):
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--repo-id ${HF_USER}/so101_test \
|
||||
--local-files-only 1
|
||||
```
|
||||
|
||||
This will launch a local web server that looks like this:
|
||||
<div style="text-align:center;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/visualize_dataset_html.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="100%"></img>
|
||||
</div>
|
||||
|
||||
## Replay an episode
|
||||
|
||||
A useful feature is the `replay` function, which allows you to replay any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
|
||||
|
||||
You can replay the first episode on your robot with:
|
||||
```bash
|
||||
python -m lerobot.replay \
|
||||
--robot.type=so101_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=black \
|
||||
--dataset.repo_id=aliberts/record-test \
|
||||
--dataset.episode=2
|
||||
```
|
||||
|
||||
Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/so101_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so101_test \
|
||||
--job_name=act_so101_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain the command:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`.
|
||||
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
|
||||
#### Upload policy checkpoints
|
||||
|
||||
Once training is done, upload the latest checkpoint with:
|
||||
```bash
|
||||
huggingface-cli upload ${HF_USER}/act_so101_test \
|
||||
outputs/train/act_so101_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
You can also upload intermediate checkpoints with:
|
||||
```bash
|
||||
CKPT=010000
|
||||
huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \
|
||||
outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model
|
||||
```
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` script from [`lerobot/record.py`](https://github.com/huggingface/lerobot/blob/main/lerobot/record.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python -m lerobot.record \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/ttyACM1 \
|
||||
--robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \
|
||||
--robot.id=blue_follower_arm \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/ttyACM0 \
|
||||
--teleop.id=red_leader_arm \
|
||||
--display_data=false \
|
||||
--dataset.repo_id=$HF_USER/eval_lego_${EPOCHREALTIME/[^0-9]/} \
|
||||
--dataset.single_task="Put lego brick into the transparent box" \
|
||||
--policy.path=${HF_USER}/act_johns_arm
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`).
|
||||
512
docs/source/hilserl.mdx
Normal file
@@ -0,0 +1,512 @@
|
||||
# HilSerl Real Robot Training Workflow Guide
|
||||
|
||||
Human-in-the-Loop Sample-Efficient Reinforcement Learning (HIL-SERL) with LeRobot workflow for taking a policy from “zero” to real-world robot mastery in just a couple of hours.
|
||||
It combines three ingredients:
|
||||
1. **Offline demonstrations & reward classifier:** a handful of human-teleop episodes plus a vision-based success detector give the policy a shaped starting point.
|
||||
2. **On-robot actor / learner loop with human interventions:** a distributed SAC/RLPD learner updates the policy while an actor explores on the physical robot; the human can jump in at any time to correct dangerous or unproductive behaviour.
|
||||
3. **Safety & efficiency tools:** joint/EE bounds, impedance control, crop-ROI preprocessing and WandB monitoring keep the data useful and the hardware safe.
|
||||
|
||||
Together these elements let HIL-SERL reach near-perfect task success and faster cycle times than imitation-only baselines.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/hilserl-main-figure.png" alt="HIL-SERL workflow" title="HIL-SERL workflow" width="100%"></img>
|
||||
</p>
|
||||
|
||||
<p align="center"><i>HIL-SERL workflow, Luo et al. 2024</i></p>
|
||||
|
||||
This guide provides step-by-step instructions for training a robot policy using LeRobot's HilSerl implementation to train on a real robot.
|
||||
|
||||
|
||||
# 1. Real Robot Training Workflow
|
||||
|
||||
## 1.1 Understanding Configuration
|
||||
|
||||
The training process begins with proper configuration for the HILSerl environment. The configuration class of interest is `HILSerlRobotEnvConfig` in `lerobot/common/envs/configs.py`. Which is defined as:
|
||||
|
||||
```python
|
||||
class HILSerlRobotEnvConfig(EnvConfig):
|
||||
robot: Optional[RobotConfig] = None # Main robot agent (defined in `lerobot/common/robots`)
|
||||
teleop: Optional[TeleoperatorConfig] = None # Teleoperator agent, e.g., gamepad or leader arm, (defined in `lerobot/common/teleoperators`)
|
||||
wrapper: Optional[EnvTransformConfig] = None # Environment wrapper settings; check `lerobot/scripts/server/gym_manipulator.py`
|
||||
fps: int = 10 # Control frequency
|
||||
name: str = "real_robot" # Environment name
|
||||
mode: str = None # "record", "replay", or None (for training)
|
||||
repo_id: Optional[str] = None # LeRobot dataset repository ID
|
||||
dataset_root: Optional[str] = None # Local dataset root (optional)
|
||||
task: str = "" # Task identifier
|
||||
num_episodes: int = 10 # Number of episodes for recording
|
||||
episode: int = 0 # episode index for replay
|
||||
device: str = "cuda" # Compute device
|
||||
push_to_hub: bool = True # Whether to push the recorded datasets to Hub
|
||||
pretrained_policy_name_or_path: Optional[str] = None # For policy loading
|
||||
reward_classifier_pretrained_path: Optional[str] = None # For reward model
|
||||
```
|
||||
|
||||
|
||||
## 1.2 Finding Robot Workspace Bounds
|
||||
|
||||
Before collecting demonstrations, you need to determine the appropriate operational bounds for your robot.
|
||||
|
||||
This helps simplifying the problem of learning on the real robot by limiting the robot's operational space to a specific region that solves the task and avoids unnecessary or unsafe exploration.
|
||||
|
||||
### 1.2.1 Using find_joint_limits.py
|
||||
|
||||
This script helps you find the safe operational bounds for your robot's end-effector. Given that you have a follower and leader arm, you can use the script to find the bounds for the follower arm that will be applied during training.
|
||||
Bounding the action space will reduce the redundant exploration of the agent and guarantees safety.
|
||||
|
||||
```bash
|
||||
python -m lerobot.scripts.find_joint_limits \
|
||||
--robot.type=so100_follower \
|
||||
--robot.port=/dev/tty.usbmodem58760431541 \
|
||||
--robot.id=black \
|
||||
--teleop.type=so100_leader \
|
||||
--teleop.port=/dev/tty.usbmodem58760431551 \
|
||||
--teleop.id=blue
|
||||
```
|
||||
|
||||
### 1.2.2 Workflow
|
||||
|
||||
1. Run the script and move the robot through the space that solves the task
|
||||
2. The script will record the minimum and maximum end-effector positions and the joint angles and prints them to the console, for example:
|
||||
```
|
||||
Max ee position [0.24170487 0.201285 0.10273342]
|
||||
Min ee position [0.16631757 -0.08237468 0.03364977]
|
||||
Max joint positions [-20.0, -20.0, -20.0, -20.0, -20.0, -20.0]
|
||||
Min joint positions [50.0, 50.0, 50.0, 50.0, 50.0, 50.0]
|
||||
```
|
||||
3. Use these values in the configuration of you teleoperation device (TeleoperatorConfig) under the `end_effector_bounds` field
|
||||
|
||||
### 1.2.3 Example Configuration
|
||||
|
||||
```json
|
||||
"end_effector_bounds": {
|
||||
"max": [0.24, 0.20, 0.10],
|
||||
"min": [0.16, -0.08, 0.03]
|
||||
}
|
||||
```
|
||||
|
||||
## 1.3 Collecting Demonstrations
|
||||
|
||||
With the bounds defined, you can safely collect demonstrations for training. Training RL with off-policy algorithm allows us to use offline datasets collected in order to improve the efficiency of the learning process.
|
||||
|
||||
### 1.3.1 Setting Up Record Mode
|
||||
|
||||
Create a configuration file for recording demonstrations (or edit an existing one like `env_config_so100.json`):
|
||||
|
||||
1. Set `mode` to `"record"`
|
||||
2. Specify a unique `repo_id` for your dataset (e.g., "username/task_name")
|
||||
3. Set `num_episodes` to the number of demonstrations you want to collect
|
||||
4. Set `crop_params_dict` to `null` initially (we'll determine crops later)
|
||||
5. Configure `robot`, `cameras`, and other hardware settings
|
||||
|
||||
Example configuration section:
|
||||
```json
|
||||
"mode": "record",
|
||||
"repo_id": "username/pick_lift_cube",
|
||||
"dataset_root": null,
|
||||
"task": "pick_and_lift",
|
||||
"num_episodes": 15,
|
||||
"episode": 0,
|
||||
"push_to_hub": true
|
||||
```
|
||||
|
||||
### 1.3.2 Gamepad Controls
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/gamepad_guide.jpg?raw=true" alt="Figure shows the control mappings on a Logitech gamepad." title="Gamepad Control Mapping" width="100%"></img>
|
||||
</p>
|
||||
<p align="center"><i>Gamepad button mapping for robot control and episode management</i></p>
|
||||
|
||||
|
||||
### 1.3.3 Recording Demonstrations
|
||||
|
||||
Start the recording process:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config_so100.json
|
||||
```
|
||||
|
||||
During recording:
|
||||
1. The robot will reset to the initial position defined in the configuration file `fixed_reset_position`
|
||||
2. Use the gamepad to control the robot by setting `"control_mode"="gamepad"` in the configuration file
|
||||
3. Complete the task successfully
|
||||
4. The episode ends with a reward of 1 when you press the "success" button
|
||||
5. If the time limit is reached, or the fail button is pressed, the episode ends with a reward of 0
|
||||
6. You can rerecord an episode by pressing the "rerecord" button
|
||||
7. The process automatically continues to the next episode
|
||||
8. After recording all episodes, the dataset is pushed to the Hugging Face Hub (optional) and saved locally
|
||||
|
||||
|
||||
|
||||
## 1.4 Processing the Dataset
|
||||
|
||||
After collecting demonstrations, process them to determine optimal camera crops.
|
||||
Reinforcement learning is sensitive to background distractions, so it is important to crop the images to the relevant workspace area.
|
||||
Note: If you already know the crop parameters, you can skip this step and just set the `crop_params_dict` in the configuration file during recording.
|
||||
|
||||
### 1.4.1 Determining Crop Parameters
|
||||
|
||||
Use the `crop_dataset_roi.py` script to interactively select regions of interest in your camera images:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/rl/crop_dataset_roi.py --repo-id username/pick_lift_cube
|
||||
```
|
||||
|
||||
1. For each camera view, the script will display the first frame
|
||||
2. Draw a rectangle around the relevant workspace area
|
||||
3. Press 'c' to confirm the selection
|
||||
4. Repeat for all camera views
|
||||
5. The script outputs cropping parameters and creates a new cropped dataset
|
||||
|
||||
Example output:
|
||||
```
|
||||
Selected Rectangular Regions of Interest (top, left, height, width):
|
||||
observation.images.side: [180, 207, 180, 200]
|
||||
observation.images.front: [180, 250, 120, 150]
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/crop_dataset.gif" width="600"/>
|
||||
</p>
|
||||
|
||||
<p align="center"><i>Interactive cropping tool for selecting regions of interest</i></p>
|
||||
|
||||
|
||||
### 1.4.2 Updating Configuration
|
||||
|
||||
Add these crop parameters to your training configuration:
|
||||
|
||||
```json
|
||||
"crop_params_dict": {
|
||||
"observation.images.side": [180, 207, 180, 200],
|
||||
"observation.images.front": [180, 250, 120, 150]
|
||||
},
|
||||
"resize_size": [128, 128]
|
||||
```
|
||||
|
||||
## 1.5 Training with Actor-Learner
|
||||
|
||||
The LeRobot system uses a distributed actor-learner architecture for training. You will need to start two processes: a learner and an actor.
|
||||
|
||||
### 1.5.1 Configuration Setup
|
||||
|
||||
Create a training configuration file (See example `train_config_hilserl_so100.json`). The training config is based on the main `TrainPipelineConfig` class in `lerobot/configs/train.py`.
|
||||
|
||||
1. Set `mode` to `null` (for training mode)
|
||||
2. Configure the policy settings (`type`, `device`, etc.)
|
||||
3. Set `dataset` to your cropped dataset
|
||||
4. Configure environment settings with crop parameters
|
||||
5. Check the other parameters related to SAC.
|
||||
6. Verify that the `policy` config is correct with the right `input_features` and `output_features` for your task.
|
||||
|
||||
### 1.5.2 Starting the Learner
|
||||
|
||||
First, start the learner server process:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/rl/learner.py --config_path lerobot/configs/train_config_hilserl_so100.json
|
||||
```
|
||||
|
||||
The learner:
|
||||
- Initializes the policy network
|
||||
- Prepares replay buffers
|
||||
- Opens a gRPC server to communicate with actors
|
||||
- Processes transitions and updates the policy
|
||||
|
||||
### 1.5.3 Starting the Actor
|
||||
|
||||
In a separate terminal, start the actor process with the same configuration:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/rl/actor.py --config_path lerobot/configs/train_config_hilserl_so100.json
|
||||
```
|
||||
|
||||
The actor:
|
||||
- Connects to the learner via gRPC
|
||||
- Initializes the environment
|
||||
- Execute rollouts of the policy to collect experience
|
||||
- Sends transitions to the learner
|
||||
- Receives updated policy parameters
|
||||
|
||||
### 1.5.4 Training Flow
|
||||
|
||||
The training proceeds automatically:
|
||||
|
||||
1. The actor executes the policy in the environment
|
||||
2. Transitions are collected and sent to the learner
|
||||
3. The learner updates the policy based on these transitions
|
||||
4. Updated policy parameters are sent back to the actor
|
||||
5. The process continues until the specified step limit is reached
|
||||
|
||||
### 1.5.5 Human in the Loop
|
||||
|
||||
- The key to learning efficiently is to have a human interventions to provide corrective feedback and completing the task to aide the policy learning and exploration.
|
||||
- To perform human interventions, you can press the upper right trigger button on the gamepad. This will pause the policy actions and allow you to take over.
|
||||
- A successful experiment is one where the human has to intervene at the start but then reduces the amount of interventions as the policy improves. You can monitor the intervention rate in the `wandb` dashboard.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/hil_effect.png?raw=true" alt="Figure shows the control mappings on a Logitech gamepad." title="Gamepad Control Mapping" width="100%"></img>
|
||||
</p>
|
||||
|
||||
<p align="center"><i>Example showing how human interventions help guide policy learning over time</i></p>
|
||||
|
||||
- The figure shows the plot of the episodic reward over interaction step. The figure shows the effect of human interventions on the policy learning.
|
||||
- The orange curve is an experiment without any human interventions. While the pink and blue curves are experiments with human interventions.
|
||||
- We can observe that the number of steps where the policy starts achieving the maximum reward is cut by a quarter when human interventions are present.
|
||||
|
||||
#### Guide to Human Interventions
|
||||
The strategy to follow is to intervene heavily at the start of training and then reduce the amount of interventions as the training progresses. Some tips and hints:
|
||||
- Interevene for almost the length of the entire episode at the first few episodes.
|
||||
- When the policy is less chaotic, gradually reduce the intervention time during one episode and let the policy explore for a longer time.
|
||||
- Once the policy start guiding the robot towards achieving the task, even if its not perfect, you can limit your interventions to simple quick actions like a grasping command, or grasp and lift command.
|
||||
|
||||
## 1.6 Monitoring and Debugging
|
||||
|
||||
If you have `wandb.enable` set to `true` in your configuration, you can monitor training progress in real-time through the [Weights & Biases](https://wandb.ai/site/) dashboard.
|
||||
|
||||
# 2. Training a Reward Classifier with LeRobot
|
||||
|
||||
This guide explains how to train a reward classifier for human-in-the-loop reinforcement learning implementation of LeRobot. Reward classifiers learn to predict the reward value given a state which can be used in an RL setup to train a policy.
|
||||
|
||||
|
||||
The reward classifier implementation in `modeling_classifier.py` uses a pretrained vision model to process the images. It can output either a single value for binary rewards to predict success/fail cases or multiple values for multi-class settings.
|
||||
|
||||
## 2.1 Collecting a Dataset
|
||||
Before training, you need to collect a dataset with labeled examples. The `record_dataset` function in `gym_manipulator.py` enables the process of collecting a dataset of observations, actions, and rewards.
|
||||
|
||||
To collect a dataset, you need to modeify some parameters in the environment configuration based on HILSerlRobotEnvConfig.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/reward_classifier_train_config.json
|
||||
```
|
||||
|
||||
### 2.1.1 Key Parameters for Data Collection:
|
||||
|
||||
- **mode**: set it to "record" to collect a dataset
|
||||
- **repo_id**: "hf_username/dataset_name", name of the dataset and repo on the hub
|
||||
- **num_episodes**: Number of episodes to record
|
||||
- **number_of_steps_after_success**: Number of additional frames to record after a success (reward=1) is detected
|
||||
- **fps**: Number of frames per second to record
|
||||
- **push_to_hub**: Whether to push the dataset to the hub
|
||||
|
||||
The `number_of_steps_after_success` parameter is crucial as it allows you to collect more positive examples. When a success is detected, the system will continue recording for the specified number of steps while maintaining the reward=1 label. Otherwise, there won't be enough states in the dataset labeled to 1 to train a good classifier.
|
||||
|
||||
Example configuration section for data collection:
|
||||
|
||||
```json
|
||||
{
|
||||
"mode": "record",
|
||||
"repo_id": "hf_username/dataset_name",
|
||||
"dataset_root": "data/your_dataset",
|
||||
"num_episodes": 20,
|
||||
"push_to_hub": true,
|
||||
"fps": 10,
|
||||
"number_of_steps_after_success": 15
|
||||
}
|
||||
```
|
||||
|
||||
## 2.2 Reward Classifier Configuration
|
||||
|
||||
The reward classifier is configured using `configuration_classifier.py`. Here are the key parameters:
|
||||
|
||||
- **model_name**: Base model architecture (e.g., we mainly use "helper2424/resnet10")
|
||||
- **model_type**: "cnn" or "transformer"
|
||||
- **num_cameras**: Number of camera inputs
|
||||
- **num_classes**: Number of output classes (typically 2 for binary success/failure)
|
||||
- **hidden_dim**: Size of hidden representation
|
||||
- **dropout_rate**: Regularization parameter
|
||||
- **learning_rate**: Learning rate for optimizer
|
||||
|
||||
Example configuration from `reward_classifier_train_config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"policy": {
|
||||
"type": "reward_classifier",
|
||||
"model_name": "helper2424/resnet10",
|
||||
"model_type": "cnn",
|
||||
"num_cameras": 2,
|
||||
"num_classes": 2,
|
||||
"hidden_dim": 256,
|
||||
"dropout_rate": 0.1,
|
||||
"learning_rate": 1e-4,
|
||||
"device": "cuda",
|
||||
"use_amp": true,
|
||||
"input_features": {
|
||||
"observation.images.front": {
|
||||
"type": "VISUAL",
|
||||
"shape": [3, 128, 128]
|
||||
},
|
||||
"observation.images.side": {
|
||||
"type": "VISUAL",
|
||||
"shape": [3, 128, 128]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 2.3 Training the Classifier
|
||||
|
||||
To train the classifier, use the `train.py` script with your configuration:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config_path lerobot/configs/reward_classifier_train_config.json
|
||||
```
|
||||
|
||||
## 2.4 Deploying and Testing the Model
|
||||
|
||||
To use your trained reward classifier, configure the `HILSerlRobotEnvConfig` to use your model:
|
||||
|
||||
```python
|
||||
env_config = HILSerlRobotEnvConfig(
|
||||
reward_classifier_pretrained_path="path_to_your_pretrained_trained_model",
|
||||
# Other environment parameters
|
||||
)
|
||||
```
|
||||
or set the argument in the json config file.
|
||||
|
||||
```json
|
||||
{
|
||||
"reward_classifier_pretrained_path": "path_to_your_pretrained_model"
|
||||
}
|
||||
```
|
||||
|
||||
Run gym_manipulator.py to test the model.
|
||||
```bash
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config.json
|
||||
```
|
||||
|
||||
The reward classifier will automatically provide rewards based on the visual input from the robot's cameras.
|
||||
|
||||
## 2.5 Example Workflow
|
||||
|
||||
1. **Create the configuration files**:
|
||||
Create the necessary json configuration files for the reward classifier and the environment. Check the `json_examples` directory for examples.
|
||||
|
||||
2. **Collect a dataset**:
|
||||
```bash
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config.json
|
||||
```
|
||||
|
||||
3. **Train the classifier**:
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config_path lerobot/configs/reward_classifier_train_config.json
|
||||
```
|
||||
|
||||
4. **Test the classifier**:
|
||||
```bash
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config.json
|
||||
```
|
||||
# 3. Using gym_hil Simulation Environments with LeRobot
|
||||
|
||||
This guide explains how to use the `gym_hil` simulation environments as an alternative to real robots when working with the LeRobot framework for Human-In-the-Loop (HIL) reinforcement learning.
|
||||
|
||||
`gym_hil` is a package that provides Gymnasium-compatible simulation environments specifically designed for Human-In-the-Loop reinforcement learning. These environments allow you to:
|
||||
|
||||
- Train policies in simulation to test the RL stack before training on real robots
|
||||
|
||||
- Collect demonstrations in sim using external devices like gamepads or keyboards
|
||||
- Perform human interventions during policy learning
|
||||
|
||||
Currently, the main environment is a Franka Panda robot simulation based on MuJoCo, with tasks like picking up a cube.
|
||||
|
||||
## 3.1 Installation
|
||||
|
||||
First, install the `gym_hil` package within the LeRobot environment:
|
||||
|
||||
```bash
|
||||
pip install gym_hil
|
||||
|
||||
# Or in LeRobot
|
||||
cd lerobot
|
||||
pip install -e .[hilserl]
|
||||
```
|
||||
|
||||
## 3.2 Configuration
|
||||
|
||||
To use `gym_hil` with LeRobot, you need to create a configuration file. An example is provided in `gym_hil_env.json`. Key configuration sections include:
|
||||
|
||||
### 3.2.1 Environment Type and Task
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "hil",
|
||||
"name": "franka_sim",
|
||||
"task": "PandaPickCubeGamepad-v0",
|
||||
"device": "cuda"
|
||||
}
|
||||
```
|
||||
|
||||
Available tasks:
|
||||
- `PandaPickCubeBase-v0`: Basic environment
|
||||
- `PandaPickCubeGamepad-v0`: With gamepad control
|
||||
- `PandaPickCubeKeyboard-v0`: With keyboard control
|
||||
|
||||
### 3.2.2 Gym Wrappers Configuration
|
||||
|
||||
```json
|
||||
"wrapper": {
|
||||
"gripper_penalty": -0.02,
|
||||
"control_time_s": 15.0,
|
||||
"use_gripper": true,
|
||||
"fixed_reset_joint_positions": [0.0, 0.195, 0.0, -2.43, 0.0, 2.62, 0.785],
|
||||
"end_effector_step_sizes": {
|
||||
"x": 0.025,
|
||||
"y": 0.025,
|
||||
"z": 0.025
|
||||
},
|
||||
"control_mode": "gamepad"
|
||||
}
|
||||
```
|
||||
|
||||
Important parameters:
|
||||
- `gripper_penalty`: Penalty for excessive gripper movement
|
||||
- `use_gripper`: Whether to enable gripper control
|
||||
- `end_effector_step_sizes`: Size of the steps in the x,y,z axes of the end-effector
|
||||
- `control_mode`: Set to "gamepad" to use a gamepad controller
|
||||
|
||||
## 3.3 Running with HIL RL of LeRobot
|
||||
|
||||
### 3.3.1 Basic Usage
|
||||
|
||||
To run the environment, set mode to null:
|
||||
|
||||
```python
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path path/to/gym_hil_env.json
|
||||
```
|
||||
|
||||
### 3.3.2 Recording a Dataset
|
||||
|
||||
To collect a dataset, set the mode to `record` whilst defining the repo_id and number of episodes to record:
|
||||
|
||||
```python
|
||||
python lerobot/scripts/rl/gym_manipulator.py --config_path path/to/gym_hil_env.json
|
||||
```
|
||||
|
||||
### 3.3.3 Training a Policy
|
||||
|
||||
To train a policy, checkout the example json in `train_gym_hil_env.json` and run the actor and learner servers:
|
||||
|
||||
```python
|
||||
python lerobot/scripts/rl/actor.py --config_path path/to/train_gym_hil_env.json
|
||||
```
|
||||
|
||||
In a different terminal, run the learner server:
|
||||
|
||||
```python
|
||||
python lerobot/scripts/rl/learner.py --config_path path/to/train_gym_hil_env.json
|
||||
```
|
||||
|
||||
The simulation environment provides a safe and repeatable way to develop and test your Human-In-the-Loop reinforcement learning components before deploying to real robots.
|
||||
|
||||
Paper citation:
|
||||
|
||||
```
|
||||
@article{luo2024precise,
|
||||
title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning},
|
||||
author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey},
|
||||
journal={arXiv preprint arXiv:2410.21845},
|
||||
year={2024}
|
||||
}
|
||||
```
|
||||
19
docs/source/index.mdx
Normal file
@@ -0,0 +1,19 @@
|
||||
<div class="flex justify-center">
|
||||
<a target="_blank" href="https://huggingface.co/lerobot">
|
||||
<img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot-logo-thumbnail.png" style="width: 100%"></img>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
# LeRobot
|
||||
|
||||
**State-of-the-art machine learning for real-world robotics**
|
||||
|
||||
🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
|
||||
|
||||
🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
|
||||
|
||||
🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started.
|
||||
|
||||
🤗 LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page.
|
||||
|
||||
Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb)
|
||||
70
docs/source/installation.mdx
Normal file
@@ -0,0 +1,70 @@
|
||||
# Installation
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
Currently only available from source.
|
||||
|
||||
Download our source code:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git
|
||||
cd lerobot
|
||||
```
|
||||
|
||||
Create a virtual environment with Python 3.10, using [`Miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install)
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
```
|
||||
|
||||
Then activate your conda environment, you have to do this each time you open a shell to use lerobot:
|
||||
```bash
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
|
||||
> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
|
||||
> ```bash
|
||||
> conda install ffmpeg=7.1.1 -c conda-forge
|
||||
> ```
|
||||
> - _[On Linux only]_ If you want to bring your own ffmpeg: Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
|
||||
To install these for linux run:
|
||||
```bash
|
||||
sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config
|
||||
```
|
||||
For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
|
||||
## Optional dependencies
|
||||
|
||||
LeRobot provides optional extras for specific functionalities. Multiple extras can be combined (e.g., `.[aloha,feetech]`). For all available extras, refer to `pyproject.toml`.
|
||||
|
||||
### Simulations
|
||||
Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), `xarm` ([gym-xarm](https://github.com/huggingface/gym-xarm)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht))
|
||||
Example:
|
||||
```bash
|
||||
pip install -e ".[aloha]" # or "[pusht]" for example
|
||||
```
|
||||
|
||||
### Motor Control
|
||||
For Koch v1.1 install the Dynamixel SDK, for SO100/SO101/Moss install the Feetech SDK.
|
||||
```bash
|
||||
pip install -e ".[feetech]" # or "[dynamixel]" for example
|
||||
```
|
||||
|
||||
### Experiment Tracking
|
||||
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
|
||||
```bash
|
||||
wandb login
|
||||
```
|
||||
1
docs/source/koch.mdx
Symbolic link
@@ -0,0 +1 @@
|
||||
../../lerobot/common/robots/koch_follower/koch.mdx
|
||||
1
docs/source/lekiwi.mdx
Symbolic link
@@ -0,0 +1 @@
|
||||
../../lerobot/common/robots/lekiwi/lekiwi.mdx
|
||||
1
docs/source/so100.mdx
Symbolic link
@@ -0,0 +1 @@
|
||||
../../lerobot/common/robots/so100_follower/so100.mdx
|
||||
1
docs/source/so101.mdx
Symbolic link
@@ -0,0 +1 @@
|
||||
../../lerobot/common/robots/so101_follower/so101.mdx
|
||||
BIN
examples.zip
@@ -1,280 +0,0 @@
|
||||
This tutorial explains how to use [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
**Configure your motors**
|
||||
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
**Remove the gears of the 6 leader motors**
|
||||
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
**Add motor horn to the motors**
|
||||
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
**Auto-calibration of follower arm**
|
||||
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/so100/follower_initial.webp?raw=true" alt="SO-100 follower arm initial position" title="SO-100 follower arm initial position" width="50%">
|
||||
</div>
|
||||
|
||||
Then run this script to launch auto-calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' --arms main_follower
|
||||
```
|
||||
|
||||
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
|
||||
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' --arms main_leader
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--robot-overrides '~cameras' \
|
||||
--display-cameras 0
|
||||
```
|
||||
|
||||
|
||||
**Teleop with displaying cameras**
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/so100.yaml
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--tags so100 tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/so100_test \
|
||||
policy=act_so100_real \
|
||||
env=so100_real \
|
||||
hydra.run.dir=outputs/train/act_so100_test \
|
||||
hydra.job.name=act_so100_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy=act_so100_real`. This loads configurations from [`lerobot/configs/policy/act_so100_real.yaml`](../lerobot/configs/policy/act_so100_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
|
||||
3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml).
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/so100.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_so100_test \
|
||||
--tags so100 tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_so100_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`).
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
|
||||
@@ -1,280 +0,0 @@
|
||||
This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot.
|
||||
|
||||
## Source the parts
|
||||
|
||||
Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already.
|
||||
|
||||
**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Configure the motors
|
||||
|
||||
Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below.
|
||||
|
||||
**Find USB ports associated to your arms**
|
||||
To find the correct ports for each arm, run the utility script twice:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
**Configure your motors**
|
||||
Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
**Remove the gears of the 6 leader motors**
|
||||
Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
**Add motor horn to the motors**
|
||||
Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock.
|
||||
Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated.
|
||||
|
||||
## Assemble the arms
|
||||
|
||||
Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm.
|
||||
|
||||
## Calibrate
|
||||
|
||||
Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another.
|
||||
|
||||
**Auto-calibration of follower arm**
|
||||
Follow step 5 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto-calibration of the follower arm. You first need to manually move your follower arm to this initial position:
|
||||
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/moss/follower_initial.webp?raw=true" alt="Moss v1 follower arm initial position" title="Moss v1 follower arm initial position" width="50%">
|
||||
</div>
|
||||
|
||||
Then run this script to launch auto-calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' --arms main_follower
|
||||
```
|
||||
|
||||
Note: You can't run auto-calibration for the leader arm, since we removed the gears. Thus, you will need to manually calibrate the leader arm. It's less precise than auto-calibration, but precision is not as critical for the leader arm.
|
||||
|
||||
**Manual calibration of leader arm**
|
||||
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| <img src="../media/moss/leader_zero.webp?raw=true" alt="Moss v1 leader arm zero position" title="Moss v1 leader arm zero position" style="width:100%;"> | <img src="../media/moss/leader_rotated.webp?raw=true" alt="Moss v1 leader arm rotated position" title="Moss v1 leader arm rotated position" style="width:100%;"> | <img src="../media/moss/leader_rest.webp?raw=true" alt="Moss v1 leader arm rest position" title="Moss v1 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py calibrate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' --arms main_leader
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--robot-overrides '~cameras' \
|
||||
--display-cameras 0
|
||||
```
|
||||
|
||||
|
||||
**Teleop with displaying cameras**
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/moss.yaml
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with Moss v1.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test \
|
||||
--tags moss tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/moss_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/moss_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/moss_test \
|
||||
policy=act_moss_real \
|
||||
env=moss_real \
|
||||
hydra.run.dir=outputs/train/act_moss_test \
|
||||
hydra.job.name=act_moss_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/moss_test`.
|
||||
2. We provided the policy with `policy=act_moss_real`. This loads configurations from [`lerobot/configs/policy/act_moss_real.yaml`](../lerobot/configs/policy/act_moss_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`.
|
||||
3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml).
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/moss.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_moss_test \
|
||||
--tags moss tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_moss_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_moss_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_moss_test`).
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925).
|
||||
@@ -1,80 +1,136 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
|
||||
It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
|
||||
|
||||
Features included in this script:
|
||||
- Loading a dataset and accessing its properties.
|
||||
- Filtering data by episode number.
|
||||
- Converting tensor data for visualization.
|
||||
- Saving video files from dataset frames.
|
||||
- Viewing a dataset's metadata and exploring its properties.
|
||||
- Loading an existing dataset from the hub or a subset of it.
|
||||
- Accessing frames by episode number.
|
||||
- Using advanced dataset features like timestamp-based frame selection.
|
||||
- Demonstrating compatibility with PyTorch DataLoader for batch processing.
|
||||
|
||||
The script ends with examples of how to batch process data using PyTorch's DataLoader.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from pprint import pprint
|
||||
|
||||
import imageio
|
||||
import torch
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
import lerobot
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
|
||||
|
||||
# We ported a number of existing datasets ourselves, use this to see the list:
|
||||
print("List of available datasets:")
|
||||
pprint(lerobot.available_datasets)
|
||||
|
||||
# Let's take one for this example
|
||||
repo_id = "lerobot/pusht"
|
||||
# You can also browse through the datasets created/ported by the community on the hub using the hub api:
|
||||
hub_api = HfApi()
|
||||
repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])]
|
||||
pprint(repo_ids)
|
||||
|
||||
# You can easily load a dataset from a Hugging Face repository
|
||||
# Or simply explore them in your web browser directly at:
|
||||
# https://huggingface.co/datasets?other=LeRobot
|
||||
|
||||
# Let's take this one for this example
|
||||
repo_id = "lerobot/aloha_mobile_cabinet"
|
||||
# We can have a look and fetch its metadata to know more about it:
|
||||
ds_meta = LeRobotDatasetMetadata(repo_id)
|
||||
|
||||
# By instantiating just this class, you can quickly access useful information about the content and the
|
||||
# structure of the dataset without downloading the actual data yet (only metadata files — which are
|
||||
# lightweight).
|
||||
print(f"Total number of episodes: {ds_meta.total_episodes}")
|
||||
print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}")
|
||||
print(f"Frames per second used during data collection: {ds_meta.fps}")
|
||||
print(f"Robot type: {ds_meta.robot_type}")
|
||||
print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n")
|
||||
|
||||
print("Tasks:")
|
||||
print(ds_meta.tasks)
|
||||
print("Features:")
|
||||
pprint(ds_meta.features)
|
||||
|
||||
# You can also get a short summary by simply printing the object:
|
||||
print(ds_meta)
|
||||
|
||||
# You can then load the actual dataset from the hub.
|
||||
# Either load any subset of episodes:
|
||||
dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23])
|
||||
|
||||
# And see how many frames you have:
|
||||
print(f"Selected episodes: {dataset.episodes}")
|
||||
print(f"Number of episodes selected: {dataset.num_episodes}")
|
||||
print(f"Number of frames selected: {dataset.num_frames}")
|
||||
|
||||
# Or simply load the entire dataset:
|
||||
dataset = LeRobotDataset(repo_id)
|
||||
print(f"Number of episodes selected: {dataset.num_episodes}")
|
||||
print(f"Number of frames selected: {dataset.num_frames}")
|
||||
|
||||
# LeRobotDataset is actually a thin wrapper around an underlying Hugging Face dataset
|
||||
# (see https://huggingface.co/docs/datasets/index for more information).
|
||||
print(dataset)
|
||||
# The previous metadata class is contained in the 'meta' attribute of the dataset:
|
||||
print(dataset.meta)
|
||||
|
||||
# LeRobotDataset actually wraps an underlying Hugging Face dataset
|
||||
# (see https://huggingface.co/docs/datasets for more information).
|
||||
print(dataset.hf_dataset)
|
||||
|
||||
# And provides additional utilities for robotics and compatibility with Pytorch
|
||||
print(f"\naverage number of frames per episode: {dataset.num_samples / dataset.num_episodes:.3f}")
|
||||
print(f"frames per second used during data collection: {dataset.fps=}")
|
||||
print(f"keys to access images from cameras: {dataset.camera_keys=}\n")
|
||||
|
||||
# Access frame indexes associated to first episode
|
||||
# LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working
|
||||
# with the latter, like iterating through the dataset.
|
||||
# The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by
|
||||
# episodes, you can access the frame indices of any episode using the episode_data_index. Here, we access
|
||||
# frame indices associated to the first episode:
|
||||
episode_index = 0
|
||||
from_idx = dataset.episode_data_index["from"][episode_index].item()
|
||||
to_idx = dataset.episode_data_index["to"][episode_index].item()
|
||||
|
||||
# LeRobot datasets actually subclass PyTorch datasets so you can do everything you know and love from working
|
||||
# with the latter, like iterating through the dataset. Here we grab all the image frames.
|
||||
frames = [dataset[idx]["observation.image"] for idx in range(from_idx, to_idx)]
|
||||
# Then we grab all the image frames from the first camera:
|
||||
camera_key = dataset.meta.camera_keys[0]
|
||||
frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)]
|
||||
|
||||
# Video frames are now float32 in range [0,1] channel first (c,h,w) to follow pytorch convention. To visualize
|
||||
# them, we convert to uint8 in range [0,255]
|
||||
frames = [(frame * 255).type(torch.uint8) for frame in frames]
|
||||
# and to channel last (h,w,c).
|
||||
frames = [frame.permute((1, 2, 0)).numpy() for frame in frames]
|
||||
# The objects returned by the dataset are all torch.Tensors
|
||||
print(type(frames[0]))
|
||||
print(frames[0].shape)
|
||||
|
||||
# Finally, we save the frames to a mp4 video for visualization.
|
||||
Path("outputs/examples/1_load_lerobot_dataset").mkdir(parents=True, exist_ok=True)
|
||||
imageio.mimsave("outputs/examples/1_load_lerobot_dataset/episode_0.mp4", frames, fps=dataset.fps)
|
||||
# Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w).
|
||||
# We can compare this shape with the information available for that feature
|
||||
pprint(dataset.features[camera_key])
|
||||
# In particular:
|
||||
print(dataset.features[camera_key]["shape"])
|
||||
# The shape is in (h, w, c) which is a more universal format.
|
||||
|
||||
# For many machine learning applications we need to load the history of past observations or trajectories of
|
||||
# future actions. Our datasets can load previous and future frames for each key/modality, using timestamps
|
||||
# differences with the current loaded frame. For instance:
|
||||
delta_timestamps = {
|
||||
# loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame
|
||||
"observation.image": [-1, -0.5, -0.20, 0],
|
||||
# loads 8 state vectors: 1.5 seconds before, 1 second before, ... 20 ms, 10 ms, and current frame
|
||||
"observation.state": [-1.5, -1, -0.5, -0.20, -0.10, -0.02, -0.01, 0],
|
||||
camera_key: [-1, -0.5, -0.20, 0],
|
||||
# loads 6 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame
|
||||
"observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0],
|
||||
# loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future
|
||||
"action": [t / dataset.fps for t in range(64)],
|
||||
}
|
||||
# Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any
|
||||
# timestamp, you still get a valid timestamp.
|
||||
|
||||
dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps)
|
||||
print(f"\n{dataset[0]['observation.image'].shape=}") # (4,c,h,w)
|
||||
print(f"{dataset[0]['observation.state'].shape=}") # (8,c)
|
||||
print(f"{dataset[0]['action'].shape=}\n") # (64,c)
|
||||
print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w)
|
||||
print(f"{dataset[0]['observation.state'].shape=}") # (6, c)
|
||||
print(f"{dataset[0]['action'].shape=}\n") # (64, c)
|
||||
|
||||
# Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just
|
||||
# PyTorch datasets.
|
||||
@@ -84,8 +140,9 @@ dataloader = torch.utils.data.DataLoader(
|
||||
batch_size=32,
|
||||
shuffle=True,
|
||||
)
|
||||
|
||||
for batch in dataloader:
|
||||
print(f"{batch['observation.image'].shape=}") # (32,4,c,h,w)
|
||||
print(f"{batch['observation.state'].shape=}") # (32,8,c)
|
||||
print(f"{batch['action'].shape=}") # (32,64,c)
|
||||
print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w)
|
||||
print(f"{batch['observation.state'].shape=}") # (32, 6, c)
|
||||
print(f"{batch['action'].shape=}") # (32, 64, c)
|
||||
break
|
||||
|
||||
@@ -1,6 +1,25 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
||||
This script demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
||||
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
|
||||
|
||||
It requires the installation of the 'gym_pusht' simulation environment. Install it by running:
|
||||
```bash
|
||||
pip install -e ".[pusht]"
|
||||
```
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
@@ -10,7 +29,6 @@ import gymnasium as gym
|
||||
import imageio
|
||||
import numpy
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
|
||||
@@ -18,25 +36,15 @@ from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
output_directory = Path("outputs/eval/example_pusht_diffusion")
|
||||
output_directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Download the diffusion policy for pusht environment
|
||||
pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))
|
||||
# OR uncomment the following to evaluate a policy from the local outputs/train folder.
|
||||
# Select your device
|
||||
device = "cuda"
|
||||
|
||||
# Provide the [hugging face repo id](https://huggingface.co/lerobot/diffusion_pusht):
|
||||
pretrained_policy_path = "lerobot/diffusion_pusht"
|
||||
# OR a path to a local outputs/train folder.
|
||||
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
||||
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
||||
policy.eval()
|
||||
|
||||
# Check if GPU is available
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda")
|
||||
print("GPU is available. Device set to:", device)
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
print(f"GPU is not available. Device set to: {device}. Inference will be slower than on GPU.")
|
||||
# Decrease the number of reverse-diffusion steps (trades off a bit of quality for 10x speed)
|
||||
policy.diffusion.num_inference_steps = 10
|
||||
|
||||
policy.to(device)
|
||||
|
||||
# Initialize evaluation environment to render two observation types:
|
||||
# an image of the scene and state/position of the agent. The environment
|
||||
@@ -47,7 +55,17 @@ env = gym.make(
|
||||
max_episode_steps=300,
|
||||
)
|
||||
|
||||
# Reset the policy and environmens to prepare for rollout
|
||||
# We can verify that the shapes of the features expected by the policy match the ones from the observations
|
||||
# produced by the environment
|
||||
print(policy.config.input_features)
|
||||
print(env.observation_space)
|
||||
|
||||
# Similarly, we can check that the actions produced by the policy will match the actions expected by the
|
||||
# environment
|
||||
print(policy.config.output_features)
|
||||
print(env.action_space)
|
||||
|
||||
# Reset the policy and environments to prepare for rollout
|
||||
policy.reset()
|
||||
numpy_observation, info = env.reset(seed=42)
|
||||
|
||||
@@ -101,7 +119,7 @@ while not done:
|
||||
rewards.append(reward)
|
||||
frames.append(env.render())
|
||||
|
||||
# The rollout is considered done when the success state is reach (i.e. terminated is True),
|
||||
# The rollout is considered done when the success state is reached (i.e. terminated is True),
|
||||
# or the maximum number of iterations is reached (i.e. truncated is True)
|
||||
done = terminated | truncated | done
|
||||
step += 1
|
||||
|
||||
@@ -1,4 +1,18 @@
|
||||
"""This scripts demonstrates how to train Diffusion Policy on the PushT environment.
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This script demonstrates how to train Diffusion Policy on the PushT environment.
|
||||
|
||||
Once you have trained a model with this script, you can try to evaluate it on
|
||||
examples/2_evaluate_pretrained_policy.py
|
||||
@@ -8,72 +22,99 @@ from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
|
||||
from lerobot.common.datasets.utils import dataset_to_policy_features
|
||||
from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
from lerobot.configs.types import FeatureType
|
||||
|
||||
# Create a directory to store the training checkpoint.
|
||||
output_directory = Path("outputs/train/example_pusht_diffusion")
|
||||
output_directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Number of offline training steps (we'll only do offline training for this example.)
|
||||
# Adjust as you prefer. 5000 steps are needed to get something worth evaluating.
|
||||
training_steps = 5000
|
||||
device = torch.device("cuda")
|
||||
log_freq = 250
|
||||
def main():
|
||||
# Create a directory to store the training checkpoint.
|
||||
output_directory = Path("outputs/train/example_pusht_diffusion")
|
||||
output_directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Set up the dataset.
|
||||
delta_timestamps = {
|
||||
# Load the previous image and state at -0.1 seconds before current frame,
|
||||
# then load current image and state corresponding to 0.0 second.
|
||||
"observation.image": [-0.1, 0.0],
|
||||
"observation.state": [-0.1, 0.0],
|
||||
# Load the previous action (-0.1), the next action to be executed (0.0),
|
||||
# and 14 future actions with a 0.1 seconds spacing. All these actions will be
|
||||
# used to supervise the policy.
|
||||
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
||||
}
|
||||
dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
|
||||
# # Select your device
|
||||
device = torch.device("cuda")
|
||||
|
||||
# Set up the the policy.
|
||||
# Policies are initialized with a configuration class, in this case `DiffusionConfig`.
|
||||
# For this example, no arguments need to be passed because the defaults are set up for PushT.
|
||||
# If you're doing something different, you will likely need to change at least some of the defaults.
|
||||
cfg = DiffusionConfig()
|
||||
policy = DiffusionPolicy(cfg, dataset_stats=dataset.stats)
|
||||
policy.train()
|
||||
policy.to(device)
|
||||
# Number of offline training steps (we'll only do offline training for this example.)
|
||||
# Adjust as you prefer. 5000 steps are needed to get something worth evaluating.
|
||||
training_steps = 5000
|
||||
log_freq = 1
|
||||
|
||||
optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
|
||||
# When starting from scratch (i.e. not from a pretrained policy), we need to specify 2 things before
|
||||
# creating the policy:
|
||||
# - input/output shapes: to properly size the policy
|
||||
# - dataset stats: for normalization and denormalization of input/outputs
|
||||
dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
|
||||
features = dataset_to_policy_features(dataset_metadata.features)
|
||||
output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
|
||||
input_features = {key: ft for key, ft in features.items() if key not in output_features}
|
||||
|
||||
# Create dataloader for offline training.
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=4,
|
||||
batch_size=64,
|
||||
shuffle=True,
|
||||
pin_memory=device != torch.device("cpu"),
|
||||
drop_last=True,
|
||||
)
|
||||
# Policies are initialized with a configuration class, in this case `DiffusionConfig`. For this example,
|
||||
# we'll just use the defaults and so no arguments other than input/output features need to be passed.
|
||||
cfg = DiffusionConfig(input_features=input_features, output_features=output_features)
|
||||
|
||||
# Run training loop.
|
||||
step = 0
|
||||
done = False
|
||||
while not done:
|
||||
for batch in dataloader:
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
output_dict = policy.forward(batch)
|
||||
loss = output_dict["loss"]
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
# We can now instantiate our policy with this config and the dataset stats.
|
||||
policy = DiffusionPolicy(cfg, dataset_stats=dataset_metadata.stats)
|
||||
policy.train()
|
||||
policy.to(device)
|
||||
|
||||
if step % log_freq == 0:
|
||||
print(f"step: {step} loss: {loss.item():.3f}")
|
||||
step += 1
|
||||
if step >= training_steps:
|
||||
done = True
|
||||
break
|
||||
# Another policy-dataset interaction is with the delta_timestamps. Each policy expects a given number frames
|
||||
# which can differ for inputs, outputs and rewards (if there are some).
|
||||
delta_timestamps = {
|
||||
"observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices],
|
||||
"observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices],
|
||||
"action": [i / dataset_metadata.fps for i in cfg.action_delta_indices],
|
||||
}
|
||||
|
||||
# Save a policy checkpoint.
|
||||
policy.save_pretrained(output_directory)
|
||||
# In this case with the standard configuration for Diffusion Policy, it is equivalent to this:
|
||||
delta_timestamps = {
|
||||
# Load the previous image and state at -0.1 seconds before current frame,
|
||||
# then load current image and state corresponding to 0.0 second.
|
||||
"observation.image": [-0.1, 0.0],
|
||||
"observation.state": [-0.1, 0.0],
|
||||
# Load the previous action (-0.1), the next action to be executed (0.0),
|
||||
# and 14 future actions with a 0.1 seconds spacing. All these actions will be
|
||||
# used to supervise the policy.
|
||||
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
||||
}
|
||||
|
||||
# We can then instantiate the dataset with these delta_timestamps configuration.
|
||||
dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
|
||||
|
||||
# Then we create our optimizer and dataloader for offline training.
|
||||
optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
|
||||
dataloader = torch.utils.data.DataLoader(
|
||||
dataset,
|
||||
num_workers=4,
|
||||
batch_size=64,
|
||||
shuffle=True,
|
||||
pin_memory=device.type != "cpu",
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
# Run training loop.
|
||||
step = 0
|
||||
done = False
|
||||
while not done:
|
||||
for batch in dataloader:
|
||||
batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
|
||||
loss, _ = policy.forward(batch)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
if step % log_freq == 0:
|
||||
print(f"step: {step} loss: {loss.item():.3f}")
|
||||
step += 1
|
||||
if step >= training_steps:
|
||||
done = True
|
||||
break
|
||||
|
||||
# Save a policy checkpoint.
|
||||
policy.save_pretrained(output_directory)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,193 +1,223 @@
|
||||
This tutorial will explain the training script, how to use it, and particularly the use of Hydra to configure everything needed for the training run.
|
||||
This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run.
|
||||
> **Note:** The following assumes you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
|
||||
|
||||
|
||||
## The training script
|
||||
|
||||
LeRobot offers a training script at [`lerobot/scripts/train.py`](../../lerobot/scripts/train.py). At a high level it does the following:
|
||||
LeRobot offers a training script at [`lerobot/scripts/train.py`](../lerobot/scripts/train.py). At a high level it does the following:
|
||||
|
||||
- Loads a Hydra configuration file for the following steps (more on Hydra in a moment).
|
||||
- Makes a simulation environment.
|
||||
- Makes a dataset corresponding to that simulation environment.
|
||||
- Makes a policy.
|
||||
- Initialize/load a configuration for the following steps using.
|
||||
- Instantiates a dataset.
|
||||
- (Optional) Instantiates a simulation environment corresponding to that dataset.
|
||||
- Instantiates a policy.
|
||||
- Runs a standard training loop with forward pass, backward pass, optimization step, and occasional logging, evaluation (of the policy on the environment), and checkpointing.
|
||||
|
||||
## Basics of how we use Hydra
|
||||
|
||||
Explaining the ins and outs of [Hydra](https://hydra.cc/docs/intro/) is beyond the scope of this document, but here we'll share the main points you need to know.
|
||||
|
||||
First, `lerobot/configs` has a directory structure like this:
|
||||
|
||||
```
|
||||
.
|
||||
├── default.yaml
|
||||
├── env
|
||||
│ ├── aloha.yaml
|
||||
│ ├── pusht.yaml
|
||||
│ └── xarm.yaml
|
||||
└── policy
|
||||
├── act.yaml
|
||||
├── diffusion.yaml
|
||||
└── tdmpc.yaml
|
||||
```
|
||||
|
||||
**_For brevity, in the rest of this document we'll drop the leading `lerobot/configs` path. So `default.yaml` really refers to `lerobot/configs/default.yaml`._**
|
||||
|
||||
When you run the training script with
|
||||
## Overview of the configuration system
|
||||
|
||||
In the training script, the main function `train` expects a `TrainPipelineConfig` object:
|
||||
```python
|
||||
python lerobot/scripts/train.py
|
||||
# train.py
|
||||
@parser.wrap()
|
||||
def train(cfg: TrainPipelineConfig):
|
||||
```
|
||||
|
||||
Hydra is set up to read `default.yaml` (via the `@hydra.main` decorator). If you take a look at the `@hydra.main`'s arguments you will see `config_path="../configs", config_name="default"`. At the top of `default.yaml`, is a `defaults` section which looks likes this:
|
||||
You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option)
|
||||
|
||||
```yaml
|
||||
defaults:
|
||||
- _self_
|
||||
- env: pusht
|
||||
- policy: diffusion
|
||||
When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated to this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.)
|
||||
|
||||
Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes:
|
||||
```python
|
||||
@dataclass
|
||||
class TrainPipelineConfig:
|
||||
dataset: DatasetConfig
|
||||
env: envs.EnvConfig | None = None
|
||||
policy: PreTrainedConfig | None = None
|
||||
```
|
||||
in which `DatasetConfig` for example is defined as such:
|
||||
```python
|
||||
@dataclass
|
||||
class DatasetConfig:
|
||||
repo_id: str
|
||||
episodes: list[int] | None = None
|
||||
video_backend: str = "pyav"
|
||||
```
|
||||
|
||||
This logic tells Hydra to incorporate configuration parameters from `env/pusht.yaml` and `policy/diffusion.yaml`. _Note: Be aware of the order as any configuration parameters with the same name will be overidden. Thus, `default.yaml` is overridden by `env/pusht.yaml` which is overidden by `policy/diffusion.yaml`_.
|
||||
This creates a hierarchical relationship where, for example assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`.
|
||||
From the command line, we can specify this value by using a very similar syntax `--dataset.repo_id=repo/id`.
|
||||
|
||||
Then, `default.yaml` also contains common configuration parameters such as `device: cuda` or `use_amp: false` (for enabling fp16 training). Some other parameters are set to `???` which indicates that they are expected to be set in additional yaml files. For instance, `training.offline_steps: ???` in `default.yaml` is set to `200000` in `diffusion.yaml`.
|
||||
By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified.
|
||||
|
||||
Thanks to this `defaults` section in `default.yaml`, if you want to train Diffusion Policy with PushT, you really only need to run:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py
|
||||
```
|
||||
|
||||
However, you can be more explicit and launch the exact same Diffusion Policy training on PushT with:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=diffusion env=pusht
|
||||
```
|
||||
|
||||
This way of overriding defaults via the CLI is especially useful when you want to change the policy and/or environment. For instance, you can train ACT on the default Aloha environment with:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=aloha
|
||||
```
|
||||
|
||||
There are two things to note here:
|
||||
- Config overrides are passed as `param_name=param_value`.
|
||||
- Here we have overridden the defaults section. `policy=act` tells Hydra to use `policy/act.yaml`, and `env=aloha` tells Hydra to use `env/aloha.yaml`.
|
||||
|
||||
_As an aside: we've set up all of our configurations so that they reproduce state-of-the-art results from papers in the literature._
|
||||
|
||||
## Overriding configuration parameters in the CLI
|
||||
|
||||
Now let's say that we want to train on a different task in the Aloha environment. If you look in `env/aloha.yaml` you will see something like:
|
||||
|
||||
```yaml
|
||||
# lerobot/configs/env/aloha.yaml
|
||||
env:
|
||||
task: AlohaInsertion-v0
|
||||
```
|
||||
|
||||
And if you look in `policy/act.yaml` you will see something like:
|
||||
|
||||
```yaml
|
||||
# lerobot/configs/policy/act.yaml
|
||||
dataset_repo_id: lerobot/aloha_sim_insertion_human
|
||||
```
|
||||
|
||||
But our Aloha environment actually supports a cube transfer task as well. To train for this task, you could manually modify the two yaml configuration files respectively.
|
||||
|
||||
First, we'd need to switch to using the cube transfer task for the ALOHA environment.
|
||||
|
||||
```diff
|
||||
# lerobot/configs/env/aloha.yaml
|
||||
env:
|
||||
- task: AlohaInsertion-v0
|
||||
+ task: AlohaTransferCube-v0
|
||||
```
|
||||
|
||||
Then, we'd also need to switch to using the cube transfer dataset.
|
||||
|
||||
```diff
|
||||
# lerobot/configs/policy/act.yaml
|
||||
-dataset_repo_id: lerobot/aloha_sim_insertion_human
|
||||
+dataset_repo_id: lerobot/aloha_sim_transfer_cube_human
|
||||
```
|
||||
|
||||
Then, you'd be able to run:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=aloha
|
||||
```
|
||||
|
||||
and you'd be training and evaluating on the cube transfer task.
|
||||
|
||||
An alternative approach to editing the yaml configuration files, would be to override the defaults via the command line:
|
||||
## Specifying values from the CLI
|
||||
|
||||
Let's say that we want to train [Diffusion Policy](../lerobot/common/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
policy=act \
|
||||
dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
env=aloha \
|
||||
env.task=AlohaTransferCube-v0
|
||||
--dataset.repo_id=lerobot/pusht \
|
||||
--policy.type=diffusion \
|
||||
--env.type=pusht
|
||||
```
|
||||
|
||||
There's something new here. Notice the `.` delimiter used to traverse the configuration hierarchy. _But be aware that the `defaults` section is an exception. As you saw above, we didn't need to write `defaults.policy=act` in the CLI. `policy=act` was enough._
|
||||
|
||||
Putting all that knowledge together, here's the command that was used to train https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human.
|
||||
Let's break this down:
|
||||
- To specify the dataset, we just need to specify its `repo_id` on the hub which is the only required argument in the `DatasetConfig`. The rest of the fields have default values and in this case we are fine with those so we can just add the option `--dataset.repo_id=lerobot/pusht`.
|
||||
- To specify the policy, we can just select diffusion policy using `--policy` appended with `.type`. Here, `.type` is a special argument which allows us to select config classes inheriting from `draccus.ChoiceRegistry` and that have been decorated with the `register_subclass()` method. To have a better explanation of this feature, have a look at this [Draccus demo](https://github.com/dlwh/draccus?tab=readme-ov-file#more-flexible-configuration-with-choice-types). In our code, we use this mechanism mainly to select policies, environments, robots, and some other components like optimizers. The policies available to select are located in [lerobot/common/policies](../lerobot/common/policies)
|
||||
- Similarly, we select the environment with `--env.type=pusht`. The different environment configs are available in [`lerobot/common/envs/configs.py`](../lerobot/common/envs/configs.py)
|
||||
|
||||
Let's see another example. Let's say you've been training [ACT](../lerobot/common/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human \
|
||||
device=cuda
|
||||
env=aloha \
|
||||
env.task=AlohaTransferCube-v0 \
|
||||
dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
policy=act \
|
||||
training.eval_freq=10000 \
|
||||
training.log_freq=250 \
|
||||
training.offline_steps=100000 \
|
||||
training.save_model=true \
|
||||
training.save_freq=25000 \
|
||||
eval.n_episodes=50 \
|
||||
eval.batch_size=50 \
|
||||
wandb.enable=false \
|
||||
--policy.type=act \
|
||||
--dataset.repo_id=lerobot/aloha_sim_insertion_human \
|
||||
--env.type=aloha \
|
||||
--output_dir=outputs/train/act_aloha_insertion
|
||||
```
|
||||
> Notice we added `--output_dir` to explicitly tell where to write outputs from this run (checkpoints, training state, configs etc.). This is not mandatory and if you don't specify it, a default directory will be created from the current date and time, env.type and policy.type. This will typically look like `outputs/train/2025-01-24/16-10-05_aloha_act`.
|
||||
|
||||
There's one new thing here: `hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human`, which specifies where to save the training output.
|
||||
|
||||
## Using a configuration file not in `lerobot/configs`
|
||||
|
||||
Above we discusses the our training script is set up such that Hydra looks for `default.yaml` in `lerobot/configs`. But, if you have a configuration file elsewhere in your filesystem you may use:
|
||||
|
||||
We now want to train a different policy for aloha on another task. We'll change the dataset and use [lerobot/aloha_sim_transfer_cube_human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) instead. Of course, we also need to change the task of the environment as well to match this other task.
|
||||
Looking at the [`AlohaEnv`](../lerobot/common/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using:
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config-dir PARENT/PATH --config-name FILE_NAME_WITHOUT_EXTENSION
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
--env.type=aloha \
|
||||
--env.task=AlohaTransferCube-v0 \
|
||||
--output_dir=outputs/train/act_aloha_transfer
|
||||
```
|
||||
|
||||
Note: here we use regular syntax for providing CLI arguments to a Python script, not Hydra's `param_name=param_value` syntax.
|
||||
## Loading from a config file
|
||||
|
||||
As a concrete example, this becomes particularly handy when you have a folder with training outputs, and would like to re-run the training. For example, say you previously ran the training script with one of the earlier commands and have `outputs/train/my_experiment/checkpoints/pretrained_model/config.yaml`. This `config.yaml` file will have the full set of configuration parameters within it. To run the training with the same configuration again, do:
|
||||
Now, let's assume that we want to reproduce the run just above. That run has produced a `train_config.json` file in its checkpoints, which serializes the `TrainPipelineConfig` instance it used:
|
||||
```json
|
||||
{
|
||||
"dataset": {
|
||||
"repo_id": "lerobot/aloha_sim_transfer_cube_human",
|
||||
"episodes": null,
|
||||
...
|
||||
},
|
||||
"env": {
|
||||
"type": "aloha",
|
||||
"task": "AlohaTransferCube-v0",
|
||||
"fps": 50,
|
||||
...
|
||||
},
|
||||
"policy": {
|
||||
"type": "act",
|
||||
"n_obs_steps": 1,
|
||||
...
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
We can then simply load the config values from this file using:
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config-dir outputs/train/my_experiment/checkpoints/last/pretrained_model --config-name config
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
|
||||
--output_dir=outputs/train/act_aloha_transfer_2
|
||||
```
|
||||
`--config_path` is also a special argument which allows to initialize the config from a local config file. It can point to a directory that contains `train_config.json` or to the config file itself directly.
|
||||
|
||||
Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
|
||||
--output_dir=outputs/train/act_aloha_transfer_2
|
||||
--policy.n_action_steps=80
|
||||
```
|
||||
> Note: While `--output_dir` is not required in general, in this case we need to specify it since it will otherwise take the value from the `train_config.json` (which is `outputs/train/act_aloha_transfer`). In order to prevent accidental deletion of previous run checkpoints, we raise an error if you're trying to write in an existing directory. This is not the case when resuming a run, which is what you'll learn next.
|
||||
|
||||
`--config_path` can also accept the repo_id of a repo on the hub that contains a `train_config.json` file, e.g. running:
|
||||
```bash
|
||||
python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht
|
||||
```
|
||||
will start a training run with the same configuration used for training [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)
|
||||
|
||||
|
||||
## Resume training
|
||||
|
||||
Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to do that here.
|
||||
|
||||
Let's reuse the command from the previous run and add a few more options:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=act \
|
||||
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
--env.type=aloha \
|
||||
--env.task=AlohaTransferCube-v0 \
|
||||
--log_freq=25 \
|
||||
--save_freq=100 \
|
||||
--output_dir=outputs/train/run_resumption
|
||||
```
|
||||
|
||||
Note that you may still use the regular syntax for config parameter overrides (eg: by adding `training.offline_steps=200000`).
|
||||
Here we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can showcase resumption. You should be able to see some logging and have a first checkpoint within 1 minute (depending on hardware). Wait for the first checkpoint to happen, you should see a line that looks like this in your terminal:
|
||||
```
|
||||
INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100
|
||||
```
|
||||
Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
|
||||
--resume=true
|
||||
```
|
||||
You should see from the logging that your training picks up from where it left off.
|
||||
|
||||
Another reason for which you might want to resume a run is simply to extend training and add more training steps. The number of training steps is set by the option `--steps`, which is 100 000 by default.
|
||||
You could double the number of steps of the previous run with:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
|
||||
--resume=true \
|
||||
--steps=200000
|
||||
```
|
||||
|
||||
## Outputs of a run
|
||||
In the output directory, there will be a folder called `checkpoints` with the following structure:
|
||||
```bash
|
||||
outputs/train/run_resumption/checkpoints
|
||||
├── 000100 # checkpoint_dir for training step 100
|
||||
│ ├── pretrained_model/
|
||||
│ │ ├── config.json # policy config
|
||||
│ │ ├── model.safetensors # policy weights
|
||||
│ │ └── train_config.json # train config
|
||||
│ └── training_state/
|
||||
│ ├── optimizer_param_groups.json # optimizer param groups
|
||||
│ ├── optimizer_state.safetensors # optimizer state
|
||||
│ ├── rng_state.safetensors # rng states
|
||||
│ ├── scheduler_state.json # scheduler state
|
||||
│ └── training_step.json # training step
|
||||
├── 000200
|
||||
└── last -> 000200 # symlink to the last available checkpoint
|
||||
```
|
||||
|
||||
## Fine-tuning a pre-trained policy
|
||||
|
||||
In addition to the features currently in Draccus, we've added a special `.path` argument for the policy, which allows to load a policy as you would with `PreTrainedPolicy.from_pretrained()`. In that case, `path` can be a local directory that contains a checkpoint or a repo_id pointing to a pretrained policy on the hub.
|
||||
|
||||
For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. We can achieve this with:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.path=lerobot/act_aloha_sim_transfer_cube_human \
|
||||
--dataset.repo_id=lerobot/aloha_sim_insertion_human \
|
||||
--env.type=aloha \
|
||||
--env.task=AlohaInsertion-v0
|
||||
```
|
||||
|
||||
When doing so, keep in mind that the features of the fine-tuning dataset would have to match the input/output features of the pretrained policy.
|
||||
|
||||
## Typical logs and metrics
|
||||
|
||||
When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you config it correctly and your config is not overrided by other files. The final configuration will also be saved with the checkpoint.
|
||||
When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you configured your run correctly. The final configuration will also be saved with the checkpoint.
|
||||
|
||||
After that, you will see training log like this one:
|
||||
|
||||
```
|
||||
INFO 2024-08-14 13:35:12 ts/train.py:192 step:0 smpl:64 ep:1 epch:0.00 loss:1.112 grdn:15.387 lr:2.0e-07 updt_s:1.738 data_s:4.774
|
||||
```
|
||||
|
||||
or evaluation log like:
|
||||
|
||||
or evaluation log:
|
||||
```
|
||||
INFO 2024-08-14 13:38:45 ts/train.py:226 step:100 smpl:6K ep:52 epch:0.25 ∑rwrd:20.693 success:0.0% eval_s:120.266
|
||||
```
|
||||
|
||||
These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here are the meaning of some abbreviations:
|
||||
|
||||
- `smpl`: number of samples seen during training.
|
||||
- `ep`: number of episodes seen during training. An episode contains multiple samples in a complete manipulation task.
|
||||
- `epch`: number of time all unique samples are seen (epoch).
|
||||
@@ -200,14 +230,45 @@ These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here
|
||||
|
||||
Some metrics are useful for initial performance profiling. For example, if you find the current GPU utilization is low via the `nvidia-smi` command and `data_s` sometimes is too high, you may need to modify batch size or number of dataloading workers to accelerate dataloading. We also recommend [pytorch profiler](https://github.com/huggingface/lerobot?tab=readme-ov-file#improve-your-code-with-profiling) for detailed performance probing.
|
||||
|
||||
---
|
||||
## In short
|
||||
|
||||
So far we've seen how to train Diffusion Policy for PushT and ACT for ALOHA. Now, what if we want to train ACT for PushT? Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
|
||||
We'll summarize here the main use cases to remember from this tutorial.
|
||||
|
||||
#### Train a policy from scratch – CLI
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.type=act \ # <- select 'act' policy
|
||||
--env.type=pusht \ # <- select 'pusht' environment
|
||||
--dataset.repo_id=lerobot/pusht # <- train on this dataset
|
||||
```
|
||||
|
||||
Please, head on over to our [advanced tutorial on adapting policy configuration to various environments](./advanced/train_act_pusht/train_act_pusht.md) to learn more.
|
||||
#### Train a policy from scratch - config file + CLI
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=path/to/pretrained_model \ # <- can also be a repo_id
|
||||
--policy.n_action_steps=80 # <- you may still override values
|
||||
```
|
||||
|
||||
Or in the meantime, happy coding! 🤗
|
||||
#### Resume/continue a training run
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=checkpoint/pretrained_model/ \
|
||||
--resume=true \
|
||||
--steps=200000 # <- you can change some training parameters
|
||||
```
|
||||
|
||||
#### Fine-tuning
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--policy.path=lerobot/act_aloha_sim_transfer_cube_human \ # <- can also be a local path to a checkpoint
|
||||
--dataset.repo_id=lerobot/aloha_sim_insertion_human \
|
||||
--env.type=aloha \
|
||||
--env.task=AlohaInsertion-v0
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Now that you know the basics of how to train a policy, you might want to know how to apply this knowledge to actual robots, or how to record your own datasets and train policies on your specific task?
|
||||
If that's the case, head over to the next tutorial [`7_get_started_with_real_robot.md`](./7_get_started_with_real_robot.md).
|
||||
|
||||
Or in the meantime, happy training! 🤗
|
||||
|
||||
@@ -29,23 +29,21 @@ For a visual walkthrough of the assembly process, you can refer to [this video t
|
||||
|
||||
## 2. Configure motors, calibrate arms, teleoperate your Koch v1.1
|
||||
|
||||
First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands.
|
||||
First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands (make sure gcc is installed).
|
||||
|
||||
Using `pip`:
|
||||
```bash
|
||||
pip install -e ".[dynamixel]"
|
||||
```
|
||||
|
||||
Or using `poetry`:
|
||||
Using `poetry`:
|
||||
```bash
|
||||
poetry install --sync --extras "dynamixel"
|
||||
poetry sync --extras "dynamixel"
|
||||
```
|
||||
|
||||
/!\ For Linux only, ffmpeg and opencv requires conda install for now. Run this exact sequence of commands:
|
||||
Using `uv`:
|
||||
```bash
|
||||
conda install -c conda-forge ffmpeg
|
||||
pip uninstall opencv-python
|
||||
conda install -c conda-forge "opencv>=4.10.0"
|
||||
uv sync --extra "dynamixel"
|
||||
```
|
||||
|
||||
You are now ready to plug the 5V power supply to the motor bus of the leader arm (the smaller one) since all its motors only require 5V.
|
||||
@@ -54,24 +52,56 @@ Then plug the 12V power supply to the motor bus of the follower arm. It has two
|
||||
|
||||
Finally, connect both arms to your computer via USB. Note that the USB doesn't provide any power, and both arms need to be plugged in with their associated power supply to be detected by your computer.
|
||||
|
||||
*Copy pasting python code*
|
||||
Now you are ready to configure your motors for the first time, as detailed in the sections below. In the upcoming sections, you'll learn about our classes and functions by running some python code in an interactive session, or by copy-pasting it in a python file.
|
||||
|
||||
If you have already configured your motors the first time, you can streamline the process by directly running the teleoperate script (which is detailed further in the tutorial):
|
||||
|
||||
> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
|
||||
|
||||
In the upcoming sections, you'll learn about our classes and functions by running some python code, in an interactive session, or by copy-pasting it in a python file. If this is your first time using the tutorial., we highly recommend going through these steps to get deeper intuition about how things work. Once you're more familiar, you can streamline the process by directly running the teleoperate script (which is detailed further in the tutorial):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/koch.yaml \
|
||||
--robot-overrides '~cameras' # do not instantiate the cameras
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=koch \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
It will automatically:
|
||||
1. Detect and help you correct any motor configuration issues.
|
||||
2. Identify any missing calibrations and initiate the calibration procedure.
|
||||
3. Connect the robot and start teleoperation.
|
||||
1. Identify any missing calibrations and initiate the calibration procedure.
|
||||
2. Connect the robot and start teleoperation.
|
||||
|
||||
### a. Control your motors with DynamixelMotorsBus
|
||||
|
||||
You can use the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) to communicate with the motors connected as a chain to the corresponding USB bus. This class leverages the Python [Dynamixel SDK](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20) to facilitate reading from and writing to the motors.
|
||||
|
||||
**First Configuration of your motors**
|
||||
|
||||
You will need to unplug each motor in turn and run a command the identify the motor. The motor will save its own identification, so you only need to do this once. Start by unplugging all of the motors.
|
||||
|
||||
Do the Leader arm first, as all of its motors are of the same type. Plug in your first motor on your leader arm and run this script to set its ID to 1.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand dynamixel \
|
||||
--model xl330-m288 \
|
||||
--baudrate 1000000 \
|
||||
--id 1
|
||||
```
|
||||
|
||||
Then unplug your first motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand dynamixel \
|
||||
--model xl330-m288 \
|
||||
--baudrate 1000000 \
|
||||
--id 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6.
|
||||
|
||||
The process for the follower arm is almost the same, but the follower arm has two types of motors. For the first two motors, make sure you set the model to `xl430-w250`. _Important: configuring follower motors requires plugging and unplugging power. Make sure you use the 5V power for the XL330s and the 12V power for the XL430s!_
|
||||
|
||||
After all of your motors are configured properly, you're ready to plug them all together in a daisy-chain as shown in the original video.
|
||||
|
||||
**Instantiate the DynamixelMotorsBus**
|
||||
|
||||
To begin, create two instances of the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py), one for each arm, using their corresponding USB ports (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`).
|
||||
@@ -105,10 +135,10 @@ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running:
|
||||
Troubleshooting: On Linux, you might need to give access to the USB ports by running this command with your ports:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
sudo chmod 666 /dev/tty.usbmodem575E0032081
|
||||
sudo chmod 666 /dev/tty.usbmodem575E0031751
|
||||
```
|
||||
|
||||
*Listing and Configuring Motors*
|
||||
@@ -117,13 +147,11 @@ Next, you'll need to list the motors for each arm, including their name, index,
|
||||
|
||||
To assign indices to the motors, run this code in an interactive Python session. Replace the `port` values with the ones you identified earlier:
|
||||
```python
|
||||
from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig
|
||||
from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus
|
||||
|
||||
leader_port = "/dev/tty.usbmodem575E0031751"
|
||||
follower_port = "/dev/tty.usbmodem575E0032081"
|
||||
|
||||
leader_arm = DynamixelMotorsBus(
|
||||
port=leader_port,
|
||||
leader_config = DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem575E0031751",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": (1, "xl330-m077"),
|
||||
@@ -135,8 +163,8 @@ leader_arm = DynamixelMotorsBus(
|
||||
},
|
||||
)
|
||||
|
||||
follower_arm = DynamixelMotorsBus(
|
||||
port=follower_port,
|
||||
follower_config = DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem575E0032081",
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": (1, "xl430-w250"),
|
||||
@@ -147,45 +175,57 @@ follower_arm = DynamixelMotorsBus(
|
||||
"gripper": (6, "xl330-m288"),
|
||||
},
|
||||
)
|
||||
|
||||
leader_arm = DynamixelMotorsBus(leader_config)
|
||||
follower_arm = DynamixelMotorsBus(follower_config)
|
||||
```
|
||||
|
||||
*Updating the YAML Configuration File*
|
||||
IMPORTANTLY: Now that you have your ports, update [`KochRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
|
||||
```python
|
||||
@RobotConfig.register_subclass("koch")
|
||||
@dataclass
|
||||
class KochRobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/koch"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
Next, update the port values in the YAML configuration file for the Koch robot at [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the ports you've identified:
|
||||
```yaml
|
||||
[...]
|
||||
robot_type: koch
|
||||
leader_arms:
|
||||
main:
|
||||
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
|
||||
port: /dev/tty.usbmodem575E0031751 # <- Update
|
||||
motors:
|
||||
# name: (index, model)
|
||||
shoulder_pan: [1, "xl330-m077"]
|
||||
shoulder_lift: [2, "xl330-m077"]
|
||||
elbow_flex: [3, "xl330-m077"]
|
||||
wrist_flex: [4, "xl330-m077"]
|
||||
wrist_roll: [5, "xl330-m077"]
|
||||
gripper: [6, "xl330-m077"]
|
||||
follower_arms:
|
||||
main:
|
||||
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
|
||||
port: /dev/tty.usbmodem575E0032081 # <- Update
|
||||
motors:
|
||||
# name: (index, model)
|
||||
shoulder_pan: [1, "xl430-w250"]
|
||||
shoulder_lift: [2, "xl430-w250"]
|
||||
elbow_flex: [3, "xl330-m288"]
|
||||
wrist_flex: [4, "xl330-m288"]
|
||||
wrist_roll: [5, "xl330-m288"]
|
||||
gripper: [6, "xl330-m288"]
|
||||
[...]
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0085511", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl330-m077"],
|
||||
"shoulder_lift": [2, "xl330-m077"],
|
||||
"elbow_flex": [3, "xl330-m077"],
|
||||
"wrist_flex": [4, "xl330-m077"],
|
||||
"wrist_roll": [5, "xl330-m077"],
|
||||
"gripper": [6, "xl330-m077"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": DynamixelMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "xl430-w250"],
|
||||
"shoulder_lift": [2, "xl430-w250"],
|
||||
"elbow_flex": [3, "xl330-m288"],
|
||||
"wrist_flex": [4, "xl330-m288"],
|
||||
"wrist_roll": [5, "xl330-m288"],
|
||||
"gripper": [6, "xl330-m288"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
Don't forget to set `robot_type: aloha` if you follow this tutorial with [Aloha bimanual robot](aloha-2.github.io) instead of Koch v1.1
|
||||
|
||||
This configuration file is used to instantiate your robot across all scripts. We'll cover how this works later on.
|
||||
|
||||
**Connect and Configure your Motors**
|
||||
|
||||
Before you can start using your motors, you'll need to configure them to ensure proper communication. When you first connect the motors, the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) automatically detects any mismatch between the current motor indices (factory set to `1`) and the specified indices (e.g., `1, 2, 3, 4, 5, 6`). This triggers a configuration procedure that requires you to unplug the power cord and motors, then reconnect each motor sequentially, starting from the one closest to the bus.
|
||||
@@ -248,6 +288,11 @@ Steps:
|
||||
- Scan for devices. All 12 motors should appear.
|
||||
- Select the motors one by one and move the arm. Check that the graphical indicator near the top right shows the movement.
|
||||
|
||||
** There is a common issue with the Dynamixel XL430-W250 motors where the motors become undiscoverable after upgrading their firmware from Mac and Windows Dynamixel Wizard2 applications. When this occurs, it is required to do a firmware recovery (Select `DYNAMIXEL Firmware Recovery` and follow the prompts). There are two known workarounds to conduct this firmware reset:
|
||||
1) Install the Dynamixel Wizard on a linux machine and complete the firmware recovery
|
||||
2) Use the Dynamixel U2D2 in order to perform the reset with Windows or Mac. This U2D2 can be purchased [here](https://www.robotis.us/u2d2/).
|
||||
For either solution, open DYNAMIXEL Wizard 2.0 and select the appropriate port. You will likely be unable to see the motor in the GUI at this time. Select `Firmware Recovery`, carefully choose the correct model, and wait for the process to complete. Finally, re-scan to confirm the firmware recovery was successful.
|
||||
|
||||
**Read and Write with DynamixelMotorsBus**
|
||||
|
||||
To get familiar with how `DynamixelMotorsBus` communicates with the motors, you can start by reading data from them. Copy past this code in the same interactive python session:
|
||||
@@ -312,27 +357,27 @@ Alternatively, you can unplug the power cord, which will automatically disable t
|
||||
|
||||
**Instantiate the ManipulatorRobot**
|
||||
|
||||
Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_arm` and `follower_arm`.
|
||||
Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_config` and `follower_config`.
|
||||
|
||||
For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_arm}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_arm, "right": right_leader_arm},`. Same thing for the follower arms.
|
||||
For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_config}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_config, "right": right_leader_config},`. Same thing for the follower arms.
|
||||
|
||||
You also need to provide a path to a calibration directory, such as `calibration_dir=".cache/calibration/koch"`. More on this in the next section.
|
||||
|
||||
Run the following code to instantiate your manipulator robot:
|
||||
```python
|
||||
from lerobot.common.robot_devices.robots.configs import KochRobotConfig
|
||||
from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
|
||||
|
||||
robot = ManipulatorRobot(
|
||||
robot_type="koch",
|
||||
leader_arms={"main": leader_arm},
|
||||
follower_arms={"main": follower_arm},
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
robot_config = KochRobotConfig(
|
||||
leader_arms={"main": leader_config},
|
||||
follower_arms={"main": follower_config},
|
||||
cameras={}, # We don't use any camera for now
|
||||
)
|
||||
robot = ManipulatorRobot(robot_config)
|
||||
```
|
||||
|
||||
The `robot_type="koch"` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
|
||||
The `KochRobotConfig` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
|
||||
|
||||
For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `robot_type="aloha"` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected. If you need to run manual calibration, simply update `calibration_dir` to `.cache/calibration/aloha`.
|
||||
For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha.
|
||||
|
||||
**Calibrate and Connect the ManipulatorRobot**
|
||||
|
||||
@@ -342,19 +387,19 @@ When you connect your robot for the first time, the [`ManipulatorRobot`](../lero
|
||||
|
||||
Here are the positions you'll move the follower arm to:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| <img src="../media/koch/follower_zero.webp?raw=true" alt="Koch v1.1 follower arm zero position" title="Koch v1.1 follower arm zero position" style="width:100%;"> | <img src="../media/koch/follower_rotated.webp?raw=true" alt="Koch v1.1 follower arm rotated position" title="Koch v1.1 follower arm rotated position" style="width:100%;"> | <img src="../media/koch/follower_rest.webp?raw=true" alt="Koch v1.1 follower arm rest position" title="Koch v1.1 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
And here are the corresponding positions for the leader arm:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
|---|---|---|
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| <img src="../media/koch/leader_zero.webp?raw=true" alt="Koch v1.1 leader arm zero position" title="Koch v1.1 leader arm zero position" style="width:100%;"> | <img src="../media/koch/leader_rotated.webp?raw=true" alt="Koch v1.1 leader arm rotated position" title="Koch v1.1 leader arm rotated position" style="width:100%;"> | <img src="../media/koch/leader_rest.webp?raw=true" alt="Koch v1.1 leader arm rest position" title="Koch v1.1 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
|
||||
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to mesure if the values changed negatively or positively.
|
||||
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask you to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively.
|
||||
|
||||
Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
|
||||
|
||||
@@ -577,11 +622,13 @@ camera_01_frame_000047.png
|
||||
|
||||
Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
|
||||
|
||||
Finally, run this code to instantiate and connectyour camera:
|
||||
Finally, run this code to instantiate and connect your camera:
|
||||
```python
|
||||
from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig
|
||||
from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
|
||||
|
||||
camera = OpenCVCamera(camera_index=0)
|
||||
config = OpenCVCameraConfig(camera_index=0)
|
||||
camera = OpenCVCamera(config)
|
||||
camera.connect()
|
||||
color_image = camera.read()
|
||||
|
||||
@@ -603,7 +650,7 @@ uint8
|
||||
|
||||
With certain camera, you can also specify additional parameters like frame rate, resolution, and color mode during instantiation. For instance:
|
||||
```python
|
||||
camera = OpenCVCamera(camera_index=0, fps=30, width=640, height=480)
|
||||
config = OpenCVCameraConfig(camera_index=0, fps=30, width=640, height=480)
|
||||
```
|
||||
|
||||
If the provided arguments are not compatible with the camera, an exception will be raised.
|
||||
@@ -617,18 +664,20 @@ camera.disconnect()
|
||||
|
||||
**Instantiate your robot with cameras**
|
||||
|
||||
Additionaly, you can set up your robot to work with your cameras.
|
||||
Additionally, you can set up your robot to work with your cameras.
|
||||
|
||||
Modify the following Python code with the appropriate camera names and configurations:
|
||||
```python
|
||||
robot = ManipulatorRobot(
|
||||
leader_arms={"main": leader_arm},
|
||||
follower_arms={"main": follower_arm},
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
cameras={
|
||||
"laptop": OpenCVCamera(0, fps=30, width=640, height=480),
|
||||
"phone": OpenCVCamera(1, fps=30, width=640, height=480),
|
||||
},
|
||||
KochRobotConfig(
|
||||
leader_arms={"main": leader_arm},
|
||||
follower_arms={"main": follower_arm},
|
||||
calibration_dir=".cache/calibration/koch",
|
||||
cameras={
|
||||
"laptop": OpenCVCameraConfig(0, fps=30, width=640, height=480),
|
||||
"phone": OpenCVCameraConfig(1, fps=30, width=640, height=480),
|
||||
},
|
||||
)
|
||||
)
|
||||
robot.connect()
|
||||
```
|
||||
@@ -652,39 +701,20 @@ torch.Size([3, 480, 640])
|
||||
255
|
||||
```
|
||||
|
||||
Also, update the following lines of the yaml file for Koch robot [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the names and configurations of your cameras:
|
||||
```yaml
|
||||
[...]
|
||||
cameras:
|
||||
laptop:
|
||||
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
|
||||
camera_index: 0
|
||||
fps: 30
|
||||
width: 640
|
||||
height: 480
|
||||
phone:
|
||||
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
|
||||
camera_index: 1
|
||||
fps: 30
|
||||
width: 640
|
||||
height: 480
|
||||
```
|
||||
### d. Use `control_robot.py` and our `teleoperate` function
|
||||
|
||||
This file is used to instantiate your robot in all our scripts. We will explain how this works in the next section.
|
||||
|
||||
### d. Use `koch.yaml` and our `teleoperate` function
|
||||
|
||||
Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the path to the robot yaml file (e.g. [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml)) and control your robot with various modes as explained next.
|
||||
Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the robot configurations via command line and control your robot with various modes as explained next.
|
||||
|
||||
Try running this code to teleoperate your robot (if you dont have a camera, keep reading):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/koch.yaml
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=koch \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
You will see a lot of lines appearing like this one:
|
||||
```
|
||||
INFO 2024-08-10 11:15:03 ol_robot.py:209 dt: 5.12 (195.1hz) dtRlead: 4.93 (203.0hz) dtRfoll: 0.19 (5239.0hz)
|
||||
INFO 2024-08-10 11:15:03 ol_robot.py:209 dt: 5.12 (195.1hz) dtRlead: 4.93 (203.0hz) dtWfoll: 0.19 (5239.0hz)
|
||||
```
|
||||
|
||||
It contains
|
||||
@@ -694,21 +724,12 @@ It contains
|
||||
- `dtRlead: 4.93 (203.0hz)` which is the number of milliseconds it took to read the position of the leader arm using `leader_arm.read("Present_Position")`.
|
||||
- `dtWfoll: 0.22 (4446.9hz)` which is the number of milliseconds it took to set a new goal position for the follower arm using `follower_arm.write("Goal_position", leader_pos)` ; note that writing is done asynchronously so it takes less time than reading.
|
||||
|
||||
Note: you can override any entry in the yaml file using `--robot-overrides` and the [hydra.cc](https://hydra.cc/docs/advanced/override_grammar/basic) syntax. If needed, you can override the ports like this:
|
||||
Importantly: If you don't have any camera, you can remove them dynamically with this [draccus](https://github.com/dlwh/draccus) syntax `--robot.cameras='{}'`:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/koch.yaml \
|
||||
--robot-overrides \
|
||||
leader_arms.main.port=/dev/tty.usbmodem575E0031751 \
|
||||
follower_arms.main.port=/dev/tty.usbmodem575E0032081
|
||||
```
|
||||
|
||||
Importantly: If you don't have any camera, you can remove them dynamically with this [hydra.cc](https://hydra.cc/docs/advanced/override_grammar/basic) syntax `'~cameras'`:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/koch.yaml \
|
||||
--robot-overrides \
|
||||
'~cameras'
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=koch \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
We advise to create a new yaml file when the command becomes too long.
|
||||
@@ -744,23 +765,23 @@ for _ in range(record_time_s * fps):
|
||||
|
||||
Importantly, many utilities are still missing. For instance, if you have cameras, you will need to save the images on disk to not go out of RAM, and to do so in threads to not slow down communication with your robot. Also, you will need to store your data in a format optimized for training and web sharing like [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py). More on this in the next section.
|
||||
|
||||
### a. Use `koch.yaml` and the `record` function
|
||||
### a. Use the `record` function
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to achieve efficient data recording. It encompasses many recording utilities:
|
||||
1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of recording.
|
||||
1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of each episode recording.
|
||||
2. Video streams from cameras are displayed in window so that you can verify them.
|
||||
3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--push-to-hub 0` is provided).
|
||||
4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again. You can also use `--force-override 1` to start recording from scratch.
|
||||
3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--control.push_to_hub=false` is provided).
|
||||
4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again with `--control.resume=true`. You will need to manually delete the dataset directory if you want to start recording from scratch.
|
||||
5. Set the flow of data recording using command line arguments:
|
||||
- `--warmup-time-s` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default).
|
||||
- `--episode-time-s` defines the number of seconds for data recording for each episode (60 seconds by default).
|
||||
- `--reset-time-s` defines the number of seconds for resetting the environment after each episode (60 seconds by default).
|
||||
- `--num-episodes` defines the number of episodes to record (50 by default).
|
||||
- `--control.warmup_time_s=10` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default).
|
||||
- `--control.episode_time_s=60` defines the number of seconds for data recording for each episode (60 seconds by default).
|
||||
- `--control.reset_time_s=60` defines the number of seconds for resetting the environment after each episode (60 seconds by default).
|
||||
- `--control.num_episodes=50` defines the number of episodes to record (50 by default).
|
||||
6. Control the flow during data recording using keyboard keys:
|
||||
- Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording.
|
||||
- Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it.
|
||||
- Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading.
|
||||
7. Similarly to `teleoperate`, you can also use `--robot-path` and `--robot-overrides` to specify your robots.
|
||||
7. Similarly to `teleoperate`, you can also use the command line to override anything.
|
||||
|
||||
Before trying `record`, if you want to push your dataset to the hub, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
@@ -771,28 +792,29 @@ Also, store your Hugging Face repository name in a variable (e.g. `cadene` or `l
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
If you don't want to push to hub, use `--push-to-hub 0`.
|
||||
If you don't want to push to hub, use `--control.push_to_hub=false`.
|
||||
|
||||
Now run this to record 2 episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/koch.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/koch_test \
|
||||
--tags tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 30 \
|
||||
--reset-time-s 30 \
|
||||
--num-episodes 2
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=koch \
|
||||
--control.type=record \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/koch_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=2 \
|
||||
--control.push_to_hub=true
|
||||
```
|
||||
|
||||
This will write your dataset locally to `{root}/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
|
||||
|
||||
This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
|
||||
|
||||
You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot
|
||||
|
||||
Remember to add `--robot-overrides '~cameras'` if you don't have any cameras and you still use the default `koch.yaml` configuration.
|
||||
|
||||
You will see a lot of lines appearing like this one:
|
||||
```
|
||||
INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz)
|
||||
@@ -804,20 +826,10 @@ It contains:
|
||||
- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
|
||||
- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
|
||||
- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchrously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchrously.
|
||||
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchronously.
|
||||
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously.
|
||||
|
||||
Troubleshooting:
|
||||
- On Linux, if you encounter a hanging issue when using cameras, uninstall opencv and re-install it with conda:
|
||||
```bash
|
||||
pip uninstall opencv-python
|
||||
conda install -c conda-forge opencv=4.10.0
|
||||
```
|
||||
- On Linux, if you encounter any issue during video encoding with `ffmpeg: unknown encoder libsvtav1`, you can:
|
||||
- install with conda-forge by running `conda install -c conda-forge ffmpeg` (it should be compiled with `libsvtav1`),
|
||||
- or, install [Homebrew](https://brew.sh) and run `brew install ffmpeg` (it should be compiled with `libsvtav1`),
|
||||
- or, install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1),
|
||||
- and, make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
|
||||
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
|
||||
|
||||
At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/koch_test) that you can obtain by running:
|
||||
@@ -825,7 +837,7 @@ At the end of data recording, your dataset will be uploaded on your Hugging Face
|
||||
echo https://huggingface.co/datasets/${HF_USER}/koch_test
|
||||
```
|
||||
|
||||
### b. Advices for recording dataset
|
||||
### b. Advice for recording dataset
|
||||
|
||||
Once you're comfortable with data recording, it's time to create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings.
|
||||
|
||||
@@ -840,10 +852,11 @@ In the coming months, we plan to release a foundational model for robotics. We a
|
||||
You can visualize your dataset by running:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/koch_test
|
||||
```
|
||||
|
||||
Note: You might need to add `--local-files-only 1` if your dataset was not uploaded to hugging face hub.
|
||||
|
||||
This will launch a local web server that looks like this:
|
||||
<div style="text-align:center;">
|
||||
<img src="../media/tutorial/visualize_dataset_html.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="100%">
|
||||
@@ -855,12 +868,12 @@ A useful feature of [`lerobot/scripts/control_robot.py`](../lerobot/scripts/cont
|
||||
|
||||
To replay the first episode of the dataset you just recorded, run the following command:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/koch.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/koch_test \
|
||||
--episode 0
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=koch \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/koch_test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
|
||||
@@ -871,54 +884,20 @@ Your robot should replicate movements similar to those you recorded. For example
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/koch_test \
|
||||
policy=act_koch_real \
|
||||
env=koch_real \
|
||||
hydra.run.dir=outputs/train/act_koch_test \
|
||||
hydra.job.name=act_koch_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/koch_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_koch_test \
|
||||
--job_name=act_koch_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/koch_test`.
|
||||
2. We provided the policy with `policy=act_koch_real`. This loads configurations from [`lerobot/configs/policy/act_koch_real.yaml`](../lerobot/configs/policy/act_koch_real.yaml). Importantly, this policy uses 2 cameras as input `laptop` and `phone`. If your dataset has different cameras, update the yaml file to account for it in the following parts:
|
||||
```yaml
|
||||
...
|
||||
override_dataset_stats:
|
||||
observation.images.laptop:
|
||||
# stats from imagenet, since we use a pretrained vision model
|
||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||
observation.images.phone:
|
||||
# stats from imagenet, since we use a pretrained vision model
|
||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||
...
|
||||
input_shapes:
|
||||
observation.images.laptop: [3, 480, 640]
|
||||
observation.images.phone: [3, 480, 640]
|
||||
...
|
||||
input_normalization_modes:
|
||||
observation.images.laptop: mean_std
|
||||
observation.images.phone: mean_std
|
||||
...
|
||||
```
|
||||
3. We provided an environment as argument with `env=koch_real`. This loads configurations from [`lerobot/configs/env/koch_real.yaml`](../lerobot/configs/env/koch_real.yaml). It looks like
|
||||
```yaml
|
||||
fps: 30
|
||||
env:
|
||||
name: real_world
|
||||
task: null
|
||||
state_dim: 6
|
||||
action_dim: 6
|
||||
fps: ${fps}
|
||||
```
|
||||
It should match your dataset (e.g. `fps: 30`) and your robot (e.g. `state_dim: 6` and `action_dim: 6`). We are still working on simplifying this in future versions of `lerobot`.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/koch_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
|
||||
|
||||
@@ -982,36 +961,36 @@ for _ in range(inference_time_s * fps):
|
||||
busy_wait(1 / fps - dt_s)
|
||||
```
|
||||
|
||||
### a. Use `koch.yaml` and our `record` function
|
||||
### a. Use our `record` function
|
||||
|
||||
Ideally, when controlling your robot with your neural network, you would want to record evaluation episodes and to be able to visualize them later on, or even train on them like in Reinforcement Learning. This pretty much corresponds to recording a new dataset but with a neural network providing the actions instead of teleoperation.
|
||||
|
||||
To this end, you can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/koch.yaml \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_koch_test \
|
||||
--tags tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 30 \
|
||||
--reset-time-s 30 \
|
||||
--num-episodes 10 \
|
||||
-p outputs/train/act_koch_test/checkpoints/last/pretrained_model
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=koch \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/eval_act_koch_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=outputs/train/act_koch_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_koch_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_koch_test`).
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_koch_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_koch_test`).
|
||||
|
||||
### b. Visualize evaluation afterwards
|
||||
|
||||
You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_koch_test
|
||||
--repo-id ${HF_USER}/eval_act_koch_test
|
||||
```
|
||||
|
||||
## 6. Next step
|
||||
@@ -1,37 +0,0 @@
|
||||
This tutorial explains how to resume a training run that you've started with the training script. If you don't know how our training script and configuration system works, please read [4_train_policy_with_script.md](./4_train_policy_with_script.md) first.
|
||||
|
||||
## Basic training resumption
|
||||
|
||||
Let's consider the example of training ACT for one of the ALOHA tasks. Here's a command that can achieve that:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.run.dir=outputs/train/run_resumption \
|
||||
policy=act \
|
||||
dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \
|
||||
env=aloha \
|
||||
env.task=AlohaTransferCube-v0 \
|
||||
training.log_freq=25 \
|
||||
training.save_checkpoint=true \
|
||||
training.save_freq=100
|
||||
```
|
||||
|
||||
Here we're using the default dataset and environment for ACT, and we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can test resumption. You should be able to see some logging and have a first checkpoint within 1 minute. Please interrupt the training after the first checkpoint.
|
||||
|
||||
To resume, all that we have to do is run the training script, providing the run directory, and the resume option:
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
hydra.run.dir=outputs/train/run_resumption \
|
||||
resume=true
|
||||
```
|
||||
|
||||
You should see from the logging that your training picks up from where it left off.
|
||||
|
||||
Note that with `resume=true`, the configuration file from the last checkpoint in the training output directory is loaded. So it doesn't matter that we haven't provided all the other configuration parameters from our previous command (although there may be warnings to notify you that your command has a different configuration than than the checkpoint).
|
||||
|
||||
---
|
||||
|
||||
Now you should know how to resume your training run in case it gets interrupted or you want to extend a finished training run.
|
||||
|
||||
Happy coding! 🤗
|
||||
@@ -1,179 +0,0 @@
|
||||
This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot.
|
||||
|
||||
## Setup
|
||||
|
||||
Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer.
|
||||
|
||||
|
||||
## Install LeRobot
|
||||
|
||||
On your computer:
|
||||
|
||||
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
|
||||
```bash
|
||||
mkdir -p ~/miniconda3
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
|
||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||
rm ~/miniconda3/miniconda.sh
|
||||
~/miniconda3/bin/conda init bash
|
||||
```
|
||||
|
||||
2. Restart shell or `source ~/.bashrc`
|
||||
|
||||
3. Create and activate a fresh conda environment for lerobot
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10 && conda activate lerobot
|
||||
```
|
||||
|
||||
4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
5. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense):
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]"
|
||||
```
|
||||
|
||||
For Linux only (not Mac), install extra dependencies for recording datasets:
|
||||
```bash
|
||||
conda install -y -c conda-forge ffmpeg
|
||||
pip uninstall -y opencv-python
|
||||
conda install -y -c conda-forge "opencv>=4.10.0"
|
||||
```
|
||||
|
||||
## Teleoperate
|
||||
|
||||
**/!\ FOR SAFETY, READ THIS /!\**
|
||||
Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly:
|
||||
1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms,
|
||||
2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics.
|
||||
|
||||
By running the following code, you can start your first **SAFE** teleoperation:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=5
|
||||
```
|
||||
|
||||
By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teloperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py teleoperate \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null
|
||||
```
|
||||
|
||||
## Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with Aloha.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test \
|
||||
--tags aloha tutorial \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 2 \
|
||||
--push-to-hub 1
|
||||
```
|
||||
|
||||
## Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/aloha_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with:
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test
|
||||
```
|
||||
|
||||
## Replay an episode
|
||||
|
||||
**/!\ FOR SAFETY, READ THIS /!\**
|
||||
Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot-overrides max_relative_target=5` to your command line as explained above.
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py replay \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/aloha_test \
|
||||
--episode 0
|
||||
```
|
||||
|
||||
## Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
DATA_DIR=data python lerobot/scripts/train.py \
|
||||
dataset_repo_id=${HF_USER}/aloha_test \
|
||||
policy=act_aloha_real \
|
||||
env=aloha_real \
|
||||
hydra.run.dir=outputs/train/act_aloha_test \
|
||||
hydra.job.name=act_aloha_test \
|
||||
device=cuda \
|
||||
wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/aloha_test`.
|
||||
2. We provided the policy with `policy=act_aloha_real`. This loads configurations from [`lerobot/configs/policy/act_aloha_real.yaml`](../lerobot/configs/policy/act_aloha_real.yaml). Importantly, this policy uses 4 cameras as input `cam_right_wrist`, `cam_left_wrist`, `cam_high`, and `cam_low`.
|
||||
3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity.
|
||||
4. We provided `device=cuda` since we are training on a Nvidia GPU.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`.
|
||||
|
||||
## Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py record \
|
||||
--robot-path lerobot/configs/robot/aloha.yaml \
|
||||
--robot-overrides max_relative_target=null \
|
||||
--fps 30 \
|
||||
--root data \
|
||||
--repo-id ${HF_USER}/eval_act_aloha_test \
|
||||
--tags aloha tutorial eval \
|
||||
--warmup-time-s 5 \
|
||||
--episode-time-s 40 \
|
||||
--reset-time-s 10 \
|
||||
--num-episodes 10 \
|
||||
--num-image-writer-processes 1 \
|
||||
-p outputs/train/act_aloha_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_aloha_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_aloha_test`).
|
||||
3. We use `--num-image-writer-processes 1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--num-image-writer-processes`.
|
||||
|
||||
## More
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination.
|
||||
|
||||
If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`.
|
||||
@@ -1,7 +1,21 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
|
||||
augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
|
||||
transforms are applied to the observation images before they are returned in the dataset's __get_item__.
|
||||
transforms are applied to the observation images before they are returned in the dataset's __getitem__.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
@@ -10,17 +24,17 @@ from torchvision.transforms import ToPILImage, v2
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
|
||||
dataset_repo_id = "lerobot/aloha_static_tape"
|
||||
dataset_repo_id = "lerobot/aloha_static_screw_driver"
|
||||
|
||||
# Create a LeRobotDataset with no transformations
|
||||
dataset = LeRobotDataset(dataset_repo_id)
|
||||
dataset = LeRobotDataset(dataset_repo_id, episodes=[0])
|
||||
# This is equivalent to `dataset = LeRobotDataset(dataset_repo_id, image_transforms=None)`
|
||||
|
||||
# Get the index of the first observation in the first episode
|
||||
first_idx = dataset.episode_data_index["from"][0].item()
|
||||
|
||||
# Get the frame corresponding to the first camera
|
||||
frame = dataset[first_idx][dataset.camera_keys[0]]
|
||||
frame = dataset[first_idx][dataset.meta.camera_keys[0]]
|
||||
|
||||
|
||||
# Define the transformations
|
||||
@@ -28,15 +42,16 @@ transforms = v2.Compose(
|
||||
[
|
||||
v2.ColorJitter(brightness=(0.5, 1.5)),
|
||||
v2.ColorJitter(contrast=(0.5, 1.5)),
|
||||
v2.ColorJitter(hue=(-0.1, 0.1)),
|
||||
v2.RandomAdjustSharpness(sharpness_factor=2, p=1),
|
||||
]
|
||||
)
|
||||
|
||||
# Create another LeRobotDataset with the defined transformations
|
||||
transformed_dataset = LeRobotDataset(dataset_repo_id, image_transforms=transforms)
|
||||
transformed_dataset = LeRobotDataset(dataset_repo_id, episodes=[0], image_transforms=transforms)
|
||||
|
||||
# Get a frame from the transformed dataset
|
||||
transformed_frame = transformed_dataset[first_idx][transformed_dataset.camera_keys[0]]
|
||||
transformed_frame = transformed_dataset[first_idx][transformed_dataset.meta.camera_keys[0]]
|
||||
|
||||
# Create a directory to store output images
|
||||
output_dir = Path("outputs/image_transforms")
|
||||
@@ -1,87 +0,0 @@
|
||||
# @package _global_
|
||||
|
||||
# Change the seed to match what PushT eval uses
|
||||
# (to avoid evaluating on seeds used for generating the training data).
|
||||
seed: 100000
|
||||
# Change the dataset repository to the PushT one.
|
||||
dataset_repo_id: lerobot/pusht
|
||||
|
||||
override_dataset_stats:
|
||||
observation.image:
|
||||
# stats from imagenet, since we use a pretrained vision model
|
||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||
|
||||
training:
|
||||
offline_steps: 80000
|
||||
online_steps: 0
|
||||
eval_freq: 10000
|
||||
save_freq: 100000
|
||||
log_freq: 250
|
||||
save_model: true
|
||||
|
||||
batch_size: 8
|
||||
lr: 1e-5
|
||||
lr_backbone: 1e-5
|
||||
weight_decay: 1e-4
|
||||
grad_clip_norm: 10
|
||||
online_steps_between_rollouts: 1
|
||||
|
||||
delta_timestamps:
|
||||
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
|
||||
|
||||
eval:
|
||||
n_episodes: 50
|
||||
batch_size: 50
|
||||
|
||||
# See `configuration_act.py` for more details.
|
||||
policy:
|
||||
name: act
|
||||
|
||||
# Input / output structure.
|
||||
n_obs_steps: 1
|
||||
chunk_size: 100 # chunk_size
|
||||
n_action_steps: 100
|
||||
|
||||
input_shapes:
|
||||
observation.image: [3, 96, 96]
|
||||
observation.state: ["${env.state_dim}"]
|
||||
output_shapes:
|
||||
action: ["${env.action_dim}"]
|
||||
|
||||
# Normalization / Unnormalization
|
||||
input_normalization_modes:
|
||||
observation.image: mean_std
|
||||
# Use min_max normalization just because it's more standard.
|
||||
observation.state: min_max
|
||||
output_normalization_modes:
|
||||
# Use min_max normalization just because it's more standard.
|
||||
action: min_max
|
||||
|
||||
# Architecture.
|
||||
# Vision backbone.
|
||||
vision_backbone: resnet18
|
||||
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
|
||||
replace_final_stride_with_dilation: false
|
||||
# Transformer layers.
|
||||
pre_norm: false
|
||||
dim_model: 512
|
||||
n_heads: 8
|
||||
dim_feedforward: 3200
|
||||
feedforward_activation: relu
|
||||
n_encoder_layers: 4
|
||||
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
|
||||
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
|
||||
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
|
||||
n_decoder_layers: 1
|
||||
# VAE.
|
||||
use_vae: true
|
||||
latent_dim: 32
|
||||
n_vae_encoder_layers: 4
|
||||
|
||||
# Inference.
|
||||
temporal_ensemble_coeff: null
|
||||
|
||||
# Training and loss computation.
|
||||
dropout: 0.1
|
||||
kl_weight: 10.0
|
||||
@@ -1,70 +0,0 @@
|
||||
In this tutorial we will learn how to adapt a policy configuration to be compatible with a new environment and dataset. As a concrete example, we will adapt the default configuration for ACT to be compatible with the PushT environment and dataset.
|
||||
|
||||
If you haven't already read our tutorial on the [training script and configuration tooling](../4_train_policy_with_script.md) please do so prior to tackling this tutorial.
|
||||
|
||||
Let's get started!
|
||||
|
||||
Suppose we want to train ACT for PushT. Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch):
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht
|
||||
```
|
||||
|
||||
We need to adapt the parameters of the ACT policy configuration to the PushT environment. The most important ones are the image keys.
|
||||
|
||||
ALOHA's datasets and environments typically use a variable number of cameras. In `lerobot/configs/policy/act.yaml` you may notice two relevant sections. Here we show you the minimal diff needed to adjust to PushT:
|
||||
|
||||
```diff
|
||||
override_dataset_stats:
|
||||
- observation.images.top:
|
||||
+ observation.image:
|
||||
# stats from imagenet, since we use a pretrained vision model
|
||||
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||
|
||||
policy:
|
||||
input_shapes:
|
||||
- observation.images.top: [3, 480, 640]
|
||||
+ observation.image: [3, 96, 96]
|
||||
observation.state: ["${env.state_dim}"]
|
||||
output_shapes:
|
||||
action: ["${env.action_dim}"]
|
||||
|
||||
input_normalization_modes:
|
||||
- observation.images.top: mean_std
|
||||
+ observation.image: mean_std
|
||||
observation.state: min_max
|
||||
output_normalization_modes:
|
||||
action: min_max
|
||||
```
|
||||
|
||||
Here we've accounted for the following:
|
||||
- PushT uses "observation.image" for its image key.
|
||||
- PushT provides smaller images.
|
||||
|
||||
_Side note: technically we could override these via the CLI, but with many changes it gets a bit messy, and we also have a bit of a challenge in that we're using `.` in our observation keys which is treated by Hydra as a hierarchical separator_.
|
||||
|
||||
For your convenience, we provide [`act_pusht.yaml`](./act_pusht.yaml) in this directory. It contains the diff above, plus some other (optional) ones that are explained within. Please copy it into `lerobot/configs/policy` with:
|
||||
|
||||
```bash
|
||||
cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/act_pusht.yaml
|
||||
```
|
||||
|
||||
(remember from a [previous tutorial](../4_train_policy_with_script.md) that Hydra will look in the `lerobot/configs` directory). Now try running the following.
|
||||
|
||||
<!-- Note to contributor: are you changing this command? Note that it's tested in `Makefile`, so change it there too! -->
|
||||
```bash
|
||||
python lerobot/scripts/train.py policy=act_pusht env=pusht
|
||||
```
|
||||
|
||||
Notice that this is much the same as the command that failed at the start of the tutorial, only:
|
||||
- Now we are using `policy=act_pusht` to point to our new configuration file.
|
||||
- We can drop `dataset_repo_id=lerobot/pusht` as the change is incorporated in our new configuration file.
|
||||
|
||||
Hurrah! You're now training ACT for the PushT environment.
|
||||
|
||||
---
|
||||
|
||||
The bottom line of this tutorial is that when training policies for different environments and datasets you will need to understand what parts of the policy configuration are specific to those and make changes accordingly.
|
||||
|
||||
Happy coding! 🤗
|
||||
@@ -1,3 +1,17 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
|
||||
|
||||
This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
|
||||
@@ -9,82 +23,82 @@ on the target environment, whether that be in simulation or the real world.
|
||||
"""
|
||||
|
||||
import math
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
|
||||
from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
||||
|
||||
device = torch.device("cuda")
|
||||
|
||||
# Download the diffusion policy for pusht environment
|
||||
pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))
|
||||
# OR uncomment the following to evaluate a policy from the local outputs/train folder.
|
||||
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
||||
def main():
|
||||
device = torch.device("cuda")
|
||||
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
||||
policy.eval()
|
||||
policy.to(device)
|
||||
# Download the diffusion policy for pusht environment
|
||||
pretrained_policy_path = "lerobot/diffusion_pusht"
|
||||
# OR uncomment the following to evaluate a policy from the local outputs/train folder.
|
||||
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
||||
|
||||
# Set up the dataset.
|
||||
delta_timestamps = {
|
||||
# Load the previous image and state at -0.1 seconds before current frame,
|
||||
# then load current image and state corresponding to 0.0 second.
|
||||
"observation.image": [-0.1, 0.0],
|
||||
"observation.state": [-0.1, 0.0],
|
||||
# Load the previous action (-0.1), the next action to be executed (0.0),
|
||||
# and 14 future actions with a 0.1 seconds spacing. All these actions will be
|
||||
# used to calculate the loss.
|
||||
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
||||
}
|
||||
policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
||||
policy.eval()
|
||||
policy.to(device)
|
||||
|
||||
# Load the last 10% of episodes of the dataset as a validation set.
|
||||
# - Load full dataset
|
||||
full_dataset = LeRobotDataset("lerobot/pusht", split="train")
|
||||
# - Calculate train and val subsets
|
||||
num_train_episodes = math.floor(full_dataset.num_episodes * 90 / 100)
|
||||
num_val_episodes = full_dataset.num_episodes - num_train_episodes
|
||||
print(f"Number of episodes in full dataset: {full_dataset.num_episodes}")
|
||||
print(f"Number of episodes in training dataset (90% subset): {num_train_episodes}")
|
||||
print(f"Number of episodes in validation dataset (10% subset): {num_val_episodes}")
|
||||
# - Get first frame index of the validation set
|
||||
first_val_frame_index = full_dataset.episode_data_index["from"][num_train_episodes].item()
|
||||
# - Load frames subset belonging to validation set using the `split` argument.
|
||||
# It utilizes the `datasets` library's syntax for slicing datasets.
|
||||
# For more information on the Slice API, please see:
|
||||
# https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits
|
||||
train_dataset = LeRobotDataset(
|
||||
"lerobot/pusht", split=f"train[:{first_val_frame_index}]", delta_timestamps=delta_timestamps
|
||||
)
|
||||
val_dataset = LeRobotDataset(
|
||||
"lerobot/pusht", split=f"train[{first_val_frame_index}:]", delta_timestamps=delta_timestamps
|
||||
)
|
||||
print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
|
||||
print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")
|
||||
# Set up the dataset.
|
||||
delta_timestamps = {
|
||||
# Load the previous image and state at -0.1 seconds before current frame,
|
||||
# then load current image and state corresponding to 0.0 second.
|
||||
"observation.image": [-0.1, 0.0],
|
||||
"observation.state": [-0.1, 0.0],
|
||||
# Load the previous action (-0.1), the next action to be executed (0.0),
|
||||
# and 14 future actions with a 0.1 seconds spacing. All these actions will be
|
||||
# used to calculate the loss.
|
||||
"action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
||||
}
|
||||
|
||||
# Create dataloader for evaluation.
|
||||
val_dataloader = torch.utils.data.DataLoader(
|
||||
val_dataset,
|
||||
num_workers=4,
|
||||
batch_size=64,
|
||||
shuffle=False,
|
||||
pin_memory=device != torch.device("cpu"),
|
||||
drop_last=False,
|
||||
)
|
||||
# Load the last 10% of episodes of the dataset as a validation set.
|
||||
# - Load dataset metadata
|
||||
dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
|
||||
# - Calculate train and val episodes
|
||||
total_episodes = dataset_metadata.total_episodes
|
||||
episodes = list(range(dataset_metadata.total_episodes))
|
||||
num_train_episodes = math.floor(total_episodes * 90 / 100)
|
||||
train_episodes = episodes[:num_train_episodes]
|
||||
val_episodes = episodes[num_train_episodes:]
|
||||
print(f"Number of episodes in full dataset: {total_episodes}")
|
||||
print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}")
|
||||
print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}")
|
||||
# - Load train and val datasets
|
||||
train_dataset = LeRobotDataset(
|
||||
"lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps
|
||||
)
|
||||
val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps)
|
||||
print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
|
||||
print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")
|
||||
|
||||
# Run validation loop.
|
||||
loss_cumsum = 0
|
||||
n_examples_evaluated = 0
|
||||
for batch in val_dataloader:
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
output_dict = policy.forward(batch)
|
||||
# Create dataloader for evaluation.
|
||||
val_dataloader = torch.utils.data.DataLoader(
|
||||
val_dataset,
|
||||
num_workers=4,
|
||||
batch_size=64,
|
||||
shuffle=False,
|
||||
pin_memory=device != torch.device("cpu"),
|
||||
drop_last=False,
|
||||
)
|
||||
|
||||
loss_cumsum += output_dict["loss"].item()
|
||||
n_examples_evaluated += batch["index"].shape[0]
|
||||
# Run validation loop.
|
||||
loss_cumsum = 0
|
||||
n_examples_evaluated = 0
|
||||
for batch in val_dataloader:
|
||||
batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
||||
loss, _ = policy.forward(batch)
|
||||
|
||||
# Calculate the average loss over the validation set.
|
||||
average_loss = loss_cumsum / n_examples_evaluated
|
||||
loss_cumsum += loss.item()
|
||||
n_examples_evaluated += batch["index"].shape[0]
|
||||
|
||||
print(f"Average loss on validation set: {average_loss:.4f}")
|
||||
# Calculate the average loss over the validation set.
|
||||
average_loss = loss_cumsum / n_examples_evaluated
|
||||
|
||||
print(f"Average loss on validation set: {average_loss:.4f}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Bearings - https://amzn.eu/d/8Xz7m4C - https://amzn.eu/d/1xOo8re - https://amzn.eu/d/9LXO205 (17x) - https://amzn.eu/d/eKGj9gf (2x) Bike Components - https://amzn.eu/d/cNiQi0O (1x) Accessories - https://amzn.eu/d/ipjCq1R (1x) - https://amzn.eu/d/0ZMzC3G (1x) Screws - https://amzn.eu/d/dzNhSkJ - https://amzn.eu/d/41AhVIU - https://amzn.eu/d/8G91txy - https://amzn.eu/d/9xu0pLa - https://amzn.eu/d/c5xaClV - https://amzn.eu/d/7kudpAo - https://amzn.eu/d/2BEgJFc - https://amzn.eu/d/4q9RNby - https://amzn.eu/d/4RE2lPV - https://amzn.eu/d/63YU0l1 Inserts - https://amzn.eu/d/7fjOtOC
|
||||
@@ -1,624 +0,0 @@
|
||||
# Using the [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [A. Source the parts](#a-source-the-parts)
|
||||
- [B. Install LeRobot](#b-install-lerobot)
|
||||
- [C. Configure the Motors](#c-configure-the-motors)
|
||||
- [D. Step-by-Step Assembly Instructions](#d-step-by-step-assembly-instructions)
|
||||
- [E. Calibrate](#e-calibrate)
|
||||
- [F. Teleoperate](#f-teleoperate)
|
||||
- [G. Record a dataset](#g-record-a-dataset)
|
||||
- [H. Visualize a dataset](#h-visualize-a-dataset)
|
||||
- [I. Replay an episode](#i-replay-an-episode)
|
||||
- [J. Train a policy](#j-train-a-policy)
|
||||
- [K. Evaluate your policy](#k-evaluate-your-policy)
|
||||
- [L. More Information](#l-more-information)
|
||||
|
||||
## A. Source the parts
|
||||
|
||||
Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts,
|
||||
and advice if it's your first time printing or if you don't own a 3D printer.
|
||||
|
||||
Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly.
|
||||
|
||||
## B. Install LeRobot
|
||||
|
||||
> [!TIP]
|
||||
> We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line)
|
||||
|
||||
On your computer:
|
||||
|
||||
#### 1. [Install Miniconda](https://docs.anaconda.com/miniconda/install/#quick-command-line-install):
|
||||
|
||||
#### 2. Restart shell
|
||||
Copy paste in your shell: `source ~/.bashrc` or for Mac: `source ~/.bash_profile` or `source ~/.zshrc` if you're using zshell
|
||||
|
||||
#### 3. Create and activate a fresh conda environment for lerobot
|
||||
|
||||
<details>
|
||||
<summary><strong>Video install instructions</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/17172d3b-3b64-4b80-9cf1-b2b7c5cbd236"></video>
|
||||
|
||||
</details>
|
||||
|
||||
```bash
|
||||
conda create -y -n lerobot python=3.10
|
||||
```
|
||||
|
||||
Then activate your conda environment (do this each time you open a shell to use lerobot!):
|
||||
```bash
|
||||
conda activate lerobot
|
||||
```
|
||||
|
||||
#### 4. Clone LeRobot:
|
||||
```bash
|
||||
git clone https://github.com/huggingface/lerobot.git ~/lerobot
|
||||
```
|
||||
|
||||
#### 5. Install ffmpeg in your environment:
|
||||
When using `miniconda`, install `ffmpeg` in your environment:
|
||||
```bash
|
||||
conda install ffmpeg -c conda-forge
|
||||
```
|
||||
|
||||
#### 6. Install LeRobot with dependencies for the feetech motors:
|
||||
```bash
|
||||
cd ~/lerobot && pip install -e ".[feetech]"
|
||||
```
|
||||
|
||||
Great :hugs:! You are now done installing LeRobot and we can begin assembling the SO100 arms :robot:.
|
||||
Every time you now want to use LeRobot you can go to the `~/lerobot` folder where we installed LeRobot and run one of the commands.
|
||||
|
||||
## C. Configure the motors
|
||||
|
||||
> [!NOTE]
|
||||
> Throughout this tutorial you will find videos on how to do the steps, the full video tutorial can be found here: [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I).
|
||||
|
||||
### 1. Find the USB ports associated to each arm
|
||||
|
||||
Designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6 (F1...F6 and L1...L6).
|
||||
|
||||
#### a. Run the script to find port
|
||||
|
||||
<details>
|
||||
<summary><strong>Video finding port</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/4a21a14d-2046-4805-93c4-ee97a30ba33f"></video>
|
||||
<video src="https://github.com/user-attachments/assets/1cc3aecf-c16d-4ff9-aec7-8c175afbbce2"></video>
|
||||
</details>
|
||||
|
||||
To find the port for each bus servo adapter, run the utility script:
|
||||
```bash
|
||||
python lerobot/scripts/find_motors_bus_port.py
|
||||
```
|
||||
|
||||
#### b. Example outputs
|
||||
|
||||
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect leader arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0031751
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
|
||||
```
|
||||
Finding all available ports for the MotorBus.
|
||||
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
|
||||
Remove the usb cable from your MotorsBus and press Enter when done.
|
||||
|
||||
[...Disconnect follower arm and press Enter...]
|
||||
|
||||
The port of this MotorsBus is /dev/tty.usbmodem575E0032081
|
||||
Reconnect the usb cable.
|
||||
```
|
||||
|
||||
#### c. Troubleshooting
|
||||
On Linux, you might need to give access to the USB ports by running:
|
||||
```bash
|
||||
sudo chmod 666 /dev/ttyACM0
|
||||
sudo chmod 666 /dev/ttyACM1
|
||||
```
|
||||
|
||||
#### d. Update config file
|
||||
|
||||
IMPORTANTLY: Now that you have your ports, update the **port** default values of [`SO100RobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
|
||||
```python
|
||||
@RobotConfig.register_subclass("so100")
|
||||
@dataclass
|
||||
class So100RobotConfig(ManipulatorRobotConfig):
|
||||
calibration_dir: str = ".cache/calibration/so100"
|
||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||
# the number of motors in your follower arms.
|
||||
max_relative_target: int | None = None
|
||||
|
||||
leader_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
follower_arms: dict[str, MotorsBusConfig] = field(
|
||||
default_factory=lambda: {
|
||||
"main": FeetechMotorsBusConfig(
|
||||
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
|
||||
motors={
|
||||
# name: (index, model)
|
||||
"shoulder_pan": [1, "sts3215"],
|
||||
"shoulder_lift": [2, "sts3215"],
|
||||
"elbow_flex": [3, "sts3215"],
|
||||
"wrist_flex": [4, "sts3215"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"gripper": [6, "sts3215"],
|
||||
},
|
||||
),
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### 2. Assembling the Base
|
||||
Let's begin with assembling the follower arm base
|
||||
|
||||
#### a. Set IDs for all 12 motors
|
||||
|
||||
<details>
|
||||
<summary><strong>Video configuring motor</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef9b3317-2e11-4858-b9d3-f0a02fb48ecf"></video>
|
||||
<video src="https://github.com/user-attachments/assets/f36b5ed5-c803-4ebe-8947-b39278776a0d"></video>
|
||||
</details>
|
||||
|
||||
Plug your first motor F1 and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate. Replace the text after --port to the corresponding follower control board port and run this command in cmd:
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 1
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> These motors are currently limited. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees).
|
||||
|
||||
Then unplug your motor and plug the second motor and set its ID to 2.
|
||||
```bash
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/tty.usbmodem58760432961 \
|
||||
--brand feetech \
|
||||
--model sts3215 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
```
|
||||
|
||||
Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm.
|
||||
|
||||
|
||||
#### b. Remove the gears of the 6 leader motors
|
||||
|
||||
<details>
|
||||
<summary><strong>Video removing gears</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/0c95b88c-5b85-413d-ba19-aee2f864f2a7"></video>
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
Follow the video for removing gears. You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm.
|
||||
|
||||
## D. Step-by-Step Assembly Instructions
|
||||
|
||||
**Step 1: Clean Parts**
|
||||
- Remove all support material from the 3D-printed parts.
|
||||
---
|
||||
|
||||
### Additional Guidance
|
||||
|
||||
<details>
|
||||
<summary><strong>Video assembling arms</strong></summary>
|
||||
|
||||
<video src="https://github.com/user-attachments/assets/488a39de-0189-4461-9de3-05b015f90cca"></video>
|
||||
|
||||
</details>
|
||||
|
||||
**Note:**
|
||||
This video provides visual guidance for assembling the arms, but it doesn't specify when or how to do the wiring. Inserting the cables beforehand is much easier than doing it afterward. The first arm may take a bit more than 1 hour to assemble, but once you get used to it, you can assemble the second arm in under 1 hour.
|
||||
|
||||
---
|
||||
|
||||
### First Motor
|
||||
|
||||
**Step 2: Insert Wires**
|
||||
- Insert two wires into the first motor.
|
||||
|
||||
<img src="../media/tutorial/img1.jpg" style="height:300px;">
|
||||
|
||||
**Step 3: Install in Base**
|
||||
- Place the first motor into the base.
|
||||
|
||||
<img src="../media/tutorial/img2.jpg" style="height:300px;">
|
||||
|
||||
**Step 4: Secure Motor**
|
||||
- Fasten the motor with 4 screws. Two from the bottom and two from top.
|
||||
|
||||
**Step 5: Attach Motor Holder**
|
||||
- Slide over the first motor holder and fasten it using two screws (one on each side).
|
||||
|
||||
<img src="../media/tutorial/img4.jpg" style="height:300px;">
|
||||
|
||||
**Step 6: Attach Motor Horns**
|
||||
- Install both motor horns, securing the top horn with a screw. Try not to move the motor position when attaching the motor horn, especially for the leader arms, where we removed the gears.
|
||||
|
||||
<img src="../media/tutorial/img5.jpg" style="height:300px;">
|
||||
<details>
|
||||
<summary><strong>Video adding motor horn</strong></summary>
|
||||
<video src="https://github.com/user-attachments/assets/ef3391a4-ad05-4100-b2bd-1699bf86c969"></video>
|
||||
</details>
|
||||
|
||||
**Step 7: Attach Shoulder Part**
|
||||
- Route one wire to the back of the robot and the other to the left or in photo towards you (see photo).
|
||||
- Attach the shoulder part.
|
||||
|
||||
<img src="../media/tutorial/img6.jpg" style="height:300px;">
|
||||
|
||||
**Step 8: Secure Shoulder**
|
||||
- Tighten the shoulder part with 4 screws on top and 4 on the bottom
|
||||
*(access bottom holes by turning the shoulder).*
|
||||
|
||||
---
|
||||
|
||||
### Second Motor Assembly
|
||||
|
||||
**Step 9: Install Motor 2**
|
||||
- Slide the second motor in from the top and link the wire from motor 1 to motor 2.
|
||||
|
||||
<img src="../media/tutorial/img8.jpg" style="height:300px;">
|
||||
|
||||
**Step 10: Attach Shoulder Holder**
|
||||
- Add the shoulder motor holder.
|
||||
- Ensure the wire from motor 1 to motor 2 goes behind the holder while the other wire is routed upward (see photo).
|
||||
- This part can be tight to assemble, you can use a workbench like the image or a similar setup to push the part around the motor.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img9.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img10.jpg" style="height:250px;">
|
||||
<img src="../media/tutorial/img12.jpg" style="height:250px;">
|
||||
</div>
|
||||
|
||||
**Step 11: Secure Motor 2**
|
||||
- Fasten the second motor with 4 screws.
|
||||
|
||||
**Step 12: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 2, again use the horn screw.
|
||||
|
||||
**Step 13: Attach Base**
|
||||
- Install the base attachment using 2 screws.
|
||||
|
||||
<img src="../media/tutorial/img11.jpg" style="height:300px;">
|
||||
|
||||
**Step 14: Attach Upper Arm**
|
||||
- Attach the upper arm with 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img13.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Third Motor Assembly
|
||||
|
||||
**Step 15: Install Motor 3**
|
||||
- Route the motor cable from motor 2 through the cable holder to motor 3, then secure motor 3 with 4 screws.
|
||||
|
||||
**Step 16: Attach Motor Horn**
|
||||
- Attach both motor horns to motor 3 and secure one again with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img14.jpg" style="height:300px;">
|
||||
|
||||
**Step 17: Attach Forearm**
|
||||
- Connect the forearm to motor 3 using 4 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img15.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Fourth Motor Assembly
|
||||
|
||||
**Step 18: Install Motor 4**
|
||||
- Slide in motor 4, attach the cable from motor 3, and secure the cable in its holder with a screw.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img16.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img19.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
**Step 19: Attach Motor Holder 4**
|
||||
- Install the fourth motor holder (a tight fit). Ensure one wire is routed upward and the wire from motor 3 is routed downward (see photo).
|
||||
|
||||
<img src="../media/tutorial/img17.jpg" style="height:300px;">
|
||||
|
||||
**Step 20: Secure Motor 4 & Attach Horn**
|
||||
- Fasten motor 4 with 4 screws and attach its motor horns, use for one a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img18.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Wrist Assembly
|
||||
|
||||
**Step 21: Install Motor 5**
|
||||
- Insert motor 5 into the wrist holder and secure it with 2 front screws.
|
||||
|
||||
<img src="../media/tutorial/img20.jpg" style="height:300px;">
|
||||
|
||||
**Step 22: Attach Wrist**
|
||||
- Connect the wire from motor 4 to motor 5. And already insert the other wire for the gripper.
|
||||
- Secure the wrist to motor 4 using 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img22.jpg" style="height:300px;">
|
||||
|
||||
**Step 23: Attach Wrist Horn**
|
||||
- Install only one motor horn on the wrist motor and secure it with a horn screw.
|
||||
|
||||
<img src="../media/tutorial/img23.jpg" style="height:300px;">
|
||||
|
||||
---
|
||||
|
||||
### Follower Configuration
|
||||
|
||||
**Step 24: Attach Gripper**
|
||||
- Attach the gripper to motor 5.
|
||||
|
||||
<img src="../media/tutorial/img24.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Install Gripper Motor**
|
||||
- Insert the gripper motor, connect the motor wire from motor 5 to motor 6, and secure it with 3 screws on each side.
|
||||
|
||||
<img src="../media/tutorial/img25.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Attach Gripper Horn & Claw**
|
||||
- Attach the motor horns and again use a horn screw.
|
||||
- Install the gripper claw and secure it with 4 screws on both sides.
|
||||
|
||||
<img src="../media/tutorial/img26.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to Leader arm assembly.*
|
||||
|
||||
---
|
||||
|
||||
### Leader Configuration
|
||||
|
||||
For the leader configuration, perform **Steps 1–23**. Make sure that you removed the motor gears from the motors.
|
||||
|
||||
**Step 24: Attach Leader Holder**
|
||||
- Mount the leader holder onto the wrist and secure it with a screw.
|
||||
|
||||
<img src="../media/tutorial/img29.jpg" style="height:300px;">
|
||||
|
||||
**Step 25: Attach Handle**
|
||||
- Attach the handle to motor 5 using 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img30.jpg" style="height:300px;">
|
||||
|
||||
**Step 26: Install Gripper Motor**
|
||||
- Insert the gripper motor, secure it with 3 screws on each side, attach a motor horn using a horn screw, and connect the motor wire.
|
||||
|
||||
<img src="../media/tutorial/img31.jpg" style="height:300px;">
|
||||
|
||||
**Step 27: Attach Trigger**
|
||||
- Attach the follower trigger with 4 screws.
|
||||
|
||||
<img src="../media/tutorial/img32.jpg" style="height:300px;">
|
||||
|
||||
**Step 28: Mount Controller**
|
||||
- Attach the motor controller on the back.
|
||||
|
||||
<div style="display: flex;">
|
||||
<img src="../media/tutorial/img27.jpg" style="height:300px;">
|
||||
<img src="../media/tutorial/img28.jpg" style="height:300px;">
|
||||
</div>
|
||||
|
||||
*Assembly complete – proceed to calibration.*
|
||||
|
||||
|
||||
## E. Calibrate
|
||||
|
||||
Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another.
|
||||
|
||||
#### a. Manual calibration of follower arm
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now.
|
||||
|
||||
You will need to move the follower arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/follower_zero.webp?raw=true" alt="SO-100 follower arm zero position" title="SO-100 follower arm zero position" style="width:100%;"> | <img src="../media/so100/follower_rotated.webp?raw=true" alt="SO-100 follower arm rotated position" title="SO-100 follower arm rotated position" style="width:100%;"> | <img src="../media/so100/follower_rest.webp?raw=true" alt="SO-100 follower arm rest position" title="SO-100 follower arm rest position" style="width:100%;"> |
|
||||
|
||||
Make sure both arms are connected and run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_follower"]'
|
||||
```
|
||||
|
||||
#### b. Manual calibration of leader arm
|
||||
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
|
||||
|
||||
| 1. Zero position | 2. Rotated position | 3. Rest position |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <img src="../media/so100/leader_zero.webp?raw=true" alt="SO-100 leader arm zero position" title="SO-100 leader arm zero position" style="width:100%;"> | <img src="../media/so100/leader_rotated.webp?raw=true" alt="SO-100 leader arm rotated position" title="SO-100 leader arm rotated position" style="width:100%;"> | <img src="../media/so100/leader_rest.webp?raw=true" alt="SO-100 leader arm rest position" title="SO-100 leader arm rest position" style="width:100%;"> |
|
||||
|
||||
Run this script to launch manual calibration:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=calibrate \
|
||||
--control.arms='["main_leader"]'
|
||||
```
|
||||
|
||||
## F. Teleoperate
|
||||
|
||||
**Simple teleop**
|
||||
Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras):
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--robot.cameras='{}' \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
|
||||
#### a. Teleop with displaying cameras
|
||||
Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset.
|
||||
|
||||
> **NOTE:** To visualize the data, enable `--control.display_data=true`. This streams the data using `rerun`.
|
||||
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=teleoperate
|
||||
```
|
||||
|
||||
## G. Record a dataset
|
||||
|
||||
Once you're familiar with teleoperation, you can record your first dataset with SO-100.
|
||||
|
||||
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
|
||||
```bash
|
||||
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
|
||||
```
|
||||
|
||||
Store your Hugging Face repository name in a variable to run these commands:
|
||||
```bash
|
||||
HF_USER=$(huggingface-cli whoami | head -n 1)
|
||||
echo $HF_USER
|
||||
```
|
||||
|
||||
Record 2 episodes and upload your dataset to the hub:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/so100_test \
|
||||
--control.tags='["so100","tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=2 \
|
||||
--control.push_to_hub=true
|
||||
```
|
||||
|
||||
Note: You can resume recording by adding `--control.resume=true`.
|
||||
|
||||
## H. Visualize a dataset
|
||||
|
||||
If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
|
||||
```bash
|
||||
echo ${HF_USER}/so100_test
|
||||
```
|
||||
|
||||
If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with (a window can be opened in the browser `http://127.0.0.1:9090` with the visualization tool):
|
||||
```bash
|
||||
python lerobot/scripts/visualize_dataset_html.py \
|
||||
--repo-id ${HF_USER}/so100_test \
|
||||
--local-files-only 1
|
||||
```
|
||||
|
||||
## I. Replay an episode
|
||||
|
||||
Now try to replay the first episode on your robot:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=replay \
|
||||
--control.fps=30 \
|
||||
--control.repo_id=${HF_USER}/so100_test \
|
||||
--control.episode=0
|
||||
```
|
||||
|
||||
## J. Train a policy
|
||||
|
||||
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--dataset.repo_id=${HF_USER}/so100_test \
|
||||
--policy.type=act \
|
||||
--output_dir=outputs/train/act_so100_test \
|
||||
--job_name=act_so100_test \
|
||||
--policy.device=cuda \
|
||||
--wandb.enable=true
|
||||
```
|
||||
|
||||
Let's explain it:
|
||||
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
|
||||
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
|
||||
4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
|
||||
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
|
||||
|
||||
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
|
||||
|
||||
To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so100_test` policy:
|
||||
```bash
|
||||
python lerobot/scripts/train.py \
|
||||
--config_path=outputs/train/act_so100_test/checkpoints/last/pretrained_model/train_config.json \
|
||||
--resume=true
|
||||
```
|
||||
|
||||
## K. Evaluate your policy
|
||||
|
||||
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
|
||||
```bash
|
||||
python lerobot/scripts/control_robot.py \
|
||||
--robot.type=so100 \
|
||||
--control.type=record \
|
||||
--control.fps=30 \
|
||||
--control.single_task="Grasp a lego block and put it in the bin." \
|
||||
--control.repo_id=${HF_USER}/eval_act_so100_test \
|
||||
--control.tags='["tutorial"]' \
|
||||
--control.warmup_time_s=5 \
|
||||
--control.episode_time_s=30 \
|
||||
--control.reset_time_s=30 \
|
||||
--control.num_episodes=10 \
|
||||
--control.push_to_hub=true \
|
||||
--control.policy.path=outputs/train/act_so100_test/checkpoints/last/pretrained_model
|
||||
```
|
||||
|
||||
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
|
||||
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so100_test`).
|
||||
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so100_test`).
|
||||
|
||||
## L. More Information
|
||||
|
||||
Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot.
|
||||
|
||||
> [!TIP]
|
||||
> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb) in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039).
|
||||
@@ -1,45 +0,0 @@
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus, CalibrationMode
|
||||
|
||||
@staticmethod
|
||||
def degps_to_raw(degps: float) -> int:
|
||||
steps_per_deg = 4096.0 / 360.0
|
||||
speed_in_steps = abs(degps) * steps_per_deg
|
||||
speed_int = int(round(speed_in_steps))
|
||||
if speed_int > 0x7FFF:
|
||||
speed_int = 0x7FFF
|
||||
if degps < 0:
|
||||
return speed_int | 0x8000
|
||||
else:
|
||||
return speed_int & 0x7FFF
|
||||
|
||||
@staticmethod
|
||||
def raw_to_degps(raw_speed: int) -> float:
|
||||
steps_per_deg = 4096.0 / 360.0
|
||||
magnitude = raw_speed & 0x7FFF
|
||||
degps = magnitude / steps_per_deg
|
||||
if raw_speed & 0x8000:
|
||||
degps = -degps
|
||||
return degps
|
||||
|
||||
def main():
|
||||
# Instantiate the bus for a single motor on port /dev/ttyACM0.
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={"wrist_pitch": [1, "scs0009"]},
|
||||
protocol_version=1,
|
||||
group_sync_read=False, # using individual read calls
|
||||
)
|
||||
arm_bus.connect()
|
||||
# Read the current raw motor position.
|
||||
# Note that "Present_Position" is in the raw units.
|
||||
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
print("Current raw position:", current_raw)
|
||||
arm_bus.write("Goal_Position", 1000)
|
||||
arm_bus.disconnect()
|
||||
exit()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Before Width: | Height: | Size: 148 KiB |
@@ -1,46 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Adjust this to match your actual serial port and baud rate
|
||||
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
|
||||
BAUD_RATE = 115200
|
||||
|
||||
# Set up serial connection
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# Buffers for real-time plot
|
||||
buffer_len = 200
|
||||
val1_buffer = deque([0]*buffer_len, maxlen=buffer_len)
|
||||
val2_buffer = deque([0]*buffer_len, maxlen=buffer_len)
|
||||
|
||||
# Setup the plot
|
||||
fig, ax = plt.subplots()
|
||||
line1, = ax.plot([], [], label='Sensor 0')
|
||||
line2, = ax.plot([], [], label='Sensor 1')
|
||||
ax.set_ylim(0, 4096)
|
||||
ax.set_xlim(0, buffer_len)
|
||||
ax.legend()
|
||||
|
||||
def update(frame):
|
||||
while ser.in_waiting:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
val1 = int(parts[0])
|
||||
val2 = int(parts[1])
|
||||
val1_buffer.append(val1)
|
||||
val2_buffer.append(val2)
|
||||
except ValueError:
|
||||
pass # skip malformed lines
|
||||
|
||||
line1.set_ydata(val1_buffer)
|
||||
line1.set_xdata(range(len(val1_buffer)))
|
||||
line2.set_ydata(val2_buffer)
|
||||
line2.set_xdata(range(len(val2_buffer)))
|
||||
return line1, line2
|
||||
|
||||
ani = animation.FuncAnimation(fig, update, interval=50)
|
||||
plt.show()
|
||||
@@ -1,64 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Config
|
||||
SERIAL_PORT = '/dev/ttyACM1' # Change as needed
|
||||
BAUD_RATE = 115200
|
||||
BUFFER_LEN = 200
|
||||
|
||||
# Sensor names in order
|
||||
sensor_names = [
|
||||
"wrist_roll",
|
||||
"wrist_pitch",
|
||||
"wrist_yaw",
|
||||
"elbow_flex",
|
||||
"shoulder_roll",
|
||||
"shoulder_yaw",
|
||||
"shoulder_pitch"
|
||||
]
|
||||
|
||||
# Initialize buffers
|
||||
sensor_data = {
|
||||
name: deque([0]*BUFFER_LEN, maxlen=BUFFER_LEN)
|
||||
for name in sensor_names
|
||||
}
|
||||
|
||||
# Setup plot
|
||||
fig, axes = plt.subplots(len(sensor_names), 1, figsize=(8, 12), sharex=True)
|
||||
fig.tight_layout(pad=3.0)
|
||||
|
||||
lines = {}
|
||||
for i, name in enumerate(sensor_names):
|
||||
axes[i].set_title(name)
|
||||
axes[i].set_xlim(0, BUFFER_LEN)
|
||||
axes[i].set_ylim(0, 4096)
|
||||
line, = axes[i].plot([], [], label=name)
|
||||
axes[i].legend()
|
||||
lines[name] = line
|
||||
|
||||
# Connect to serial
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# Update function
|
||||
def update(frame):
|
||||
while ser.in_waiting:
|
||||
line = ser.readline().decode().strip()
|
||||
parts = line.split()
|
||||
if len(parts) != 7:
|
||||
continue
|
||||
try:
|
||||
values = list(map(int, parts))
|
||||
except ValueError:
|
||||
continue
|
||||
for i, name in enumerate(sensor_names):
|
||||
sensor_data[name].append(values[i])
|
||||
for name in sensor_names:
|
||||
x = range(len(sensor_data[name]))
|
||||
lines[name].set_data(x, sensor_data[name])
|
||||
return lines.values()
|
||||
|
||||
# Animate
|
||||
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
|
||||
plt.show()
|
||||
@@ -1,161 +0,0 @@
|
||||
import serial
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.animation as animation
|
||||
from collections import deque
|
||||
|
||||
# Adjust this to match your actual serial port and baud rate
|
||||
SERIAL_PORT = '/dev/ttyACM0' # or COM3 on Windows
|
||||
BAUD_RATE = 115200
|
||||
|
||||
# Set up serial connection
|
||||
ser = serial.Serial(SERIAL_PORT, BAUD_RATE)
|
||||
|
||||
# How many data points to keep in the scrolling buffer
|
||||
buffer_len = 200
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 1) Sensor buffers for existing sensors + new wrist_pitch, wrist_yaw
|
||||
# -------------------------------------------------------------------
|
||||
sensor_buffers = {
|
||||
'wrist_roll': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'elbow_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_yaw': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
'shoulder_roll': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len),
|
||||
'val2': deque([0]*buffer_len, maxlen=buffer_len)
|
||||
},
|
||||
# --- New single-valued sensors ---
|
||||
'wrist_pitch': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
|
||||
},
|
||||
'wrist_yaw': {
|
||||
'val1': deque([0]*buffer_len, maxlen=buffer_len) # Only one line
|
||||
},
|
||||
}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 2) Figure with 7 subplots (was 5). We keep the original 5 + 2 new.
|
||||
# -------------------------------------------------------------------
|
||||
fig, axes = plt.subplots(7, 1, figsize=(8, 14), sharex=True)
|
||||
fig.tight_layout(pad=3.0)
|
||||
|
||||
# We'll store line references in a dict so we can update them in update().
|
||||
lines = {}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 3) Define each subplot, including new ones at the end.
|
||||
# -------------------------------------------------------------------
|
||||
subplot_info = [
|
||||
('wrist_roll', 'Wrist Roll (2,3)', axes[0]),
|
||||
('elbow_pitch', 'Elbow Pitch (0,1)', axes[1]),
|
||||
('shoulder_pitch', 'Shoulder Pitch (10,11)', axes[2]),
|
||||
('shoulder_yaw', 'Shoulder Yaw (12,13)', axes[3]),
|
||||
('shoulder_roll', 'Shoulder Roll (14,15)', axes[4]),
|
||||
('wrist_pitch', 'Wrist Pitch (0)', axes[5]), # new
|
||||
('wrist_yaw', 'Wrist Yaw (1)', axes[6]), # new
|
||||
]
|
||||
|
||||
# Set up each subplot
|
||||
for (sensor_name, label, ax) in subplot_info:
|
||||
ax.set_title(label)
|
||||
ax.set_xlim(0, buffer_len)
|
||||
ax.set_ylim(0, 4096) # adjust if needed
|
||||
|
||||
# For existing sensors, plot 2 lines (val1, val2)
|
||||
# For the new single-line sensors, plot just 1 line
|
||||
if sensor_name in ['wrist_pitch', 'wrist_yaw']:
|
||||
# Single-valued
|
||||
line, = ax.plot([], [], label=f"{sensor_name}")
|
||||
lines[sensor_name] = line
|
||||
else:
|
||||
# Pair of values
|
||||
line1, = ax.plot([], [], label=f"{sensor_name} - val1")
|
||||
line2, = ax.plot([], [], label=f"{sensor_name} - val2")
|
||||
lines[sensor_name] = [line1, line2]
|
||||
|
||||
ax.legend()
|
||||
|
||||
def update(frame):
|
||||
# Read all available lines from the serial buffer
|
||||
while ser.in_waiting:
|
||||
raw_line = ser.readline().decode('utf-8').strip()
|
||||
parts = raw_line.split()
|
||||
|
||||
# We expect at least 16 values if all sensors are present
|
||||
if len(parts) < 7:
|
||||
continue
|
||||
|
||||
try:
|
||||
values = list(map(int, parts))
|
||||
except ValueError:
|
||||
# If there's a parsing error, skip this line
|
||||
continue
|
||||
|
||||
# Original code: extract the relevant values and append to the correct buffer
|
||||
sensor_buffers['elbow_pitch']['val1'].append(values[13])
|
||||
sensor_buffers['elbow_pitch']['val2'].append(values[13])
|
||||
|
||||
sensor_buffers['wrist_roll']['val1'].append(values[3])
|
||||
sensor_buffers['wrist_roll']['val2'].append(values[3])
|
||||
|
||||
sensor_buffers['shoulder_pitch']['val1'].append(values[14])
|
||||
sensor_buffers['shoulder_pitch']['val2'].append(values[14])
|
||||
|
||||
sensor_buffers['shoulder_yaw']['val1'].append(values[8])
|
||||
sensor_buffers['shoulder_yaw']['val2'].append(values[8])
|
||||
|
||||
sensor_buffers['shoulder_roll']['val1'].append(values[10])
|
||||
sensor_buffers['shoulder_roll']['val2'].append(values[10])
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# 4) New code: also read wrist_pitch (index 0) and wrist_yaw (index 1)
|
||||
# -------------------------------------------------------------------
|
||||
sensor_buffers['wrist_yaw']['val1'].append(values[0])
|
||||
sensor_buffers['wrist_pitch']['val1'].append(values[1])
|
||||
|
||||
# Update each line's data in each subplot
|
||||
all_lines = []
|
||||
for (sensor_name, _, ax) in subplot_info:
|
||||
# x-values are just the index range of the buffer for val1
|
||||
x_data = range(len(sensor_buffers[sensor_name]['val1']))
|
||||
|
||||
# If this sensor has two lines
|
||||
if isinstance(lines[sensor_name], list):
|
||||
# First line
|
||||
lines[sensor_name][0].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val1']
|
||||
)
|
||||
# Second line
|
||||
lines[sensor_name][1].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val2']
|
||||
)
|
||||
all_lines.extend(lines[sensor_name])
|
||||
else:
|
||||
# Single line only (wrist_pitch, wrist_yaw)
|
||||
lines[sensor_name].set_data(
|
||||
x_data,
|
||||
sensor_buffers[sensor_name]['val1']
|
||||
)
|
||||
all_lines.append(lines[sensor_name])
|
||||
|
||||
return all_lines
|
||||
|
||||
# Create the animation
|
||||
ani = animation.FuncAnimation(fig, update, interval=50, blit=False)
|
||||
|
||||
plt.show()
|
||||
@@ -1,186 +0,0 @@
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
import yaml
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_port = "/dev/tty.usbserial-140"
|
||||
self.hand_port = "/dev/tty.usbmodem58760436961"
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port = self.arm_port,
|
||||
motors={
|
||||
# "motor1": (1, "sts3250"),
|
||||
# "motor2": (2, "sts3250"),
|
||||
# "motor3": (3, "sts3250"),
|
||||
|
||||
#"shoulder_pitch": [1, "sts3215"],
|
||||
"shoulder_pitch": [1, "sm8512bl"],
|
||||
"shoulder_yaw": [2, "sts3250"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3250"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port=self.hand_port,
|
||||
|
||||
motors = {
|
||||
# Thumb
|
||||
"thumb_basel_rotation": [1, "scs0009"],
|
||||
"thumb_mcp": [3, "scs0009"],
|
||||
"thumb_pip": [4, "scs0009"],
|
||||
"thumb_dip": [13, "scs0009"],
|
||||
|
||||
# Index
|
||||
"index_thumb_side": [5, "scs0009"],
|
||||
"index_pinky_side": [6, "scs0009"],
|
||||
"index_flexor": [16, "scs0009"],
|
||||
|
||||
# Middle
|
||||
"middle_thumb_side": [8, "scs0009"],
|
||||
"middle_pinky_side": [9, "scs0009"],
|
||||
"middle_flexor": [2, "scs0009"],
|
||||
|
||||
# Ring
|
||||
"ring_thumb_side": [11, "scs0009"],
|
||||
"ring_pinky_side": [12, "scs0009"],
|
||||
"ring_flexor": [7, "scs0009"],
|
||||
|
||||
# Pinky
|
||||
"pinky_thumb_side": [14, "scs0009"],
|
||||
"pinky_pinky_side": [15, "scs0009"],
|
||||
"pinky_flexor": [10, "scs0009"],
|
||||
},
|
||||
protocol_version=1,#1
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
self.arm_calib_dict = self.get_arm_calibration()
|
||||
self.hand_calib_dict = self.get_hand_calibration()
|
||||
|
||||
|
||||
def apply_arm_config(self, config_file):
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
for param, value in config.get("robot", {}).get("arm_bus", {}).items():
|
||||
self.arm_bus.write(param, value)
|
||||
|
||||
def apply_hand_config(config_file, robot):
|
||||
with open(config_file, "r") as file:
|
||||
config = yaml.safe_load(file)
|
||||
|
||||
for param, value in config.get("robot", {}).get("hand_bus", {}).items():
|
||||
robot.arm_bus.write(param, value)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
750, # thumb_basel_rotation
|
||||
100, # thumb_mcp
|
||||
700, # thumb_pip
|
||||
100, # thumb_dip
|
||||
|
||||
800, # index_thumb_side
|
||||
950, # index_pinky_side
|
||||
0, # index_flexor
|
||||
|
||||
250, # middle_thumb_side
|
||||
850, # middle_pinky_side
|
||||
0, # middle_flexor
|
||||
|
||||
850, # ring_thumb_side
|
||||
900, # ring_pinky_side
|
||||
0, # ring_flexor
|
||||
|
||||
00, # pinky_thumb_side
|
||||
950, # pinky_pinky_side
|
||||
0, # pinky_flexor
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
start_pos[0] - 550, # thumb_basel_rotation
|
||||
start_pos[1] + 400, # thumb_mcp
|
||||
start_pos[2] + 300, # thumb_pip
|
||||
start_pos[3] + 200, # thumb_dip
|
||||
|
||||
start_pos[4] - 700, # index_thumb_side
|
||||
start_pos[5] - 300, # index_pinky_side
|
||||
start_pos[6] + 600, # index_flexor
|
||||
|
||||
start_pos[7] + 700, # middle_thumb_side
|
||||
start_pos[8] - 400, # middle_pinky_side
|
||||
start_pos[9] + 600, # middle_flexor
|
||||
|
||||
start_pos[10] - 600, # ring_thumb_side
|
||||
start_pos[11] - 400, # ring_pinky_side
|
||||
start_pos[12] + 600, # ring_flexor
|
||||
|
||||
start_pos[13] + 400, # pinky_thumb_side
|
||||
start_pos[14] - 450, # pinky_pinky_side
|
||||
start_pos[15] + 600, # pinky_flexor
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def get_arm_calibration(self):
|
||||
|
||||
homing_offset = [0] * len(self.arm_bus.motor_names)
|
||||
drive_mode = [0] * len(self.arm_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
1800, # shoulder_up
|
||||
2800, # shoulder_forward
|
||||
1800, # shoulder_roll
|
||||
1200, # bend_elbow
|
||||
700, # wrist_roll
|
||||
1850, # wrist_yaw
|
||||
1700, # wrist_pitch
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
2800, # shoulder_up
|
||||
3150, # shoulder_forward
|
||||
400, #shoulder_roll
|
||||
2300, # bend_elbow
|
||||
2300, # wrist_roll
|
||||
2150, # wrist_yaw
|
||||
2300, # wrist_pitch
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.arm_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect_arm(self):
|
||||
self.arm_bus.connect()
|
||||
|
||||
def connect_hand(self):
|
||||
self.hand_bus.connect()
|
||||
@@ -1,730 +0,0 @@
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
import serial
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
import pickle
|
||||
import cv2
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
import json
|
||||
import os
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
class HomonculusArm:
|
||||
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Number of past values to keep in memory
|
||||
self.buffer_size = 10
|
||||
|
||||
# Initialize a buffer (deque) for each joint
|
||||
self.joint_buffer = {
|
||||
"wrist_roll": deque(maxlen=self.buffer_size),
|
||||
"wrist_pitch": deque(maxlen=self.buffer_size),
|
||||
"wrist_yaw": deque(maxlen=self.buffer_size),
|
||||
"elbow_flex": deque(maxlen=self.buffer_size),
|
||||
"shoulder_roll": deque(maxlen=self.buffer_size),
|
||||
"shoulder_yaw": deque(maxlen=self.buffer_size),
|
||||
"shoulder_pitch": deque(maxlen=self.buffer_size),
|
||||
}
|
||||
|
||||
# Start the reading thread
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Last read dictionary
|
||||
self.last_d = {
|
||||
"wrist_roll": 100,
|
||||
"wrist_pitch": 100,
|
||||
"wrist_yaw": 100,
|
||||
"elbow_flex": 100,
|
||||
"shoulder_roll": 100,
|
||||
"shoulder_yaw": 100,
|
||||
"shoulder_pitch": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
# For adaptive EMA, we store a "previous smoothed" state per joint
|
||||
self.adaptive_ema_state = {
|
||||
"wrist_roll": None,
|
||||
"wrist_pitch": None,
|
||||
"wrist_yaw": None,
|
||||
"elbow_flex": None,
|
||||
"shoulder_roll": None,
|
||||
"shoulder_yaw": None,
|
||||
"shoulder_pitch": None,
|
||||
}
|
||||
|
||||
self.kalman_state = {
|
||||
joint: {"x": None, "P": None} for joint in self.joint_buffer.keys()
|
||||
}
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
"""
|
||||
Return the most recent (single) values from self.last_d,
|
||||
optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Get raw (last) values
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
#print(motor_names)
|
||||
print(values)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
|
||||
"""
|
||||
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
|
||||
for each joint, optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Gather averaged readings from buffers
|
||||
smoothed_vals = []
|
||||
for name in motor_names:
|
||||
buf = self.joint_buffer[name]
|
||||
if len(buf) == 0:
|
||||
# If no data has been read yet, fall back to last_d
|
||||
smoothed_vals.append(self.last_d[name])
|
||||
else:
|
||||
# Otherwise, average over the existing buffer
|
||||
smoothed_vals.append(np.mean(buf))
|
||||
|
||||
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
|
||||
if False:
|
||||
for i, joint_name in enumerate(motor_names):
|
||||
# Re-use the same raw_min / raw_max from the calibration
|
||||
calib_idx = self.calibration["motor_names"].index(joint_name)
|
||||
min_reading = self.calibration["start_pos"][calib_idx]
|
||||
max_reading = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
B_value = smoothed_vals[i]
|
||||
print(joint_name)
|
||||
if joint_name == "elbow_flex":
|
||||
print('elbow')
|
||||
try:
|
||||
smoothed_vals[i] = int(min_reading+(max_reading - min_reading)*np.arcsin((B_value-min_reading)/(max_reading-min_reading))/(np.pi / 2))
|
||||
except:
|
||||
print('not working')
|
||||
print(smoothed_vals)
|
||||
print('not working')
|
||||
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
|
||||
return smoothed_vals
|
||||
|
||||
def read_kalman_filter(
|
||||
self,
|
||||
Q: float = 1.0,
|
||||
R: float = 100.0,
|
||||
motor_names: list[str] | None = None
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Return a Kalman-filtered reading for each requested joint.
|
||||
|
||||
We store a separate Kalman filter (x, P) per joint. For each new measurement Z:
|
||||
1) Predict:
|
||||
x_pred = x (assuming no motion model)
|
||||
P_pred = P + Q
|
||||
2) Update:
|
||||
K = P_pred / (P_pred + R)
|
||||
x = x_pred + K * (Z - x_pred)
|
||||
P = (1 - K) * P_pred
|
||||
|
||||
:param Q: Process noise. Larger Q means the estimate can change more freely.
|
||||
:param R: Measurement noise. Larger R means we trust our sensor less.
|
||||
:param motor_names: If not specified, all joints are filtered.
|
||||
:return: Kalman-filtered positions as a numpy array.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
current_vals = np.array([self.last_d[name] for name in motor_names], dtype=np.float32)
|
||||
filtered_vals = np.zeros_like(current_vals)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
# Retrieve the filter state for this joint
|
||||
x = self.kalman_state[name]["x"]
|
||||
P = self.kalman_state[name]["P"]
|
||||
Z = current_vals[i]
|
||||
|
||||
# If this is the first reading, initialize
|
||||
if x is None or P is None:
|
||||
x = Z
|
||||
P = 1.0 # or some large initial uncertainty
|
||||
|
||||
# 1) Predict step
|
||||
x_pred = x # no velocity model, so x_pred = x
|
||||
P_pred = P + Q
|
||||
|
||||
# 2) Update step
|
||||
K = P_pred / (P_pred + R) # Kalman gain
|
||||
x_new = x_pred + K * (Z - x_pred) # new state estimate
|
||||
P_new = (1 - K) * P_pred # new covariance
|
||||
|
||||
# Save back
|
||||
self.kalman_state[name]["x"] = x_new
|
||||
self.kalman_state[name]["P"] = P_new
|
||||
|
||||
filtered_vals[i] = x_new
|
||||
|
||||
if self.calibration is not None:
|
||||
filtered_vals = self.apply_calibration(filtered_vals, motor_names)
|
||||
|
||||
return filtered_vals
|
||||
|
||||
|
||||
def async_read(self):
|
||||
"""
|
||||
Continuously read from the serial buffer in its own thread,
|
||||
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
|
||||
"""
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
|
||||
if len(vals) != 7:
|
||||
continue
|
||||
try:
|
||||
vals = [int(val) for val in vals]#remove last digit
|
||||
except ValueError:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
vals = [int(val) for val in vals]
|
||||
d = {
|
||||
"wrist_roll": vals[0],
|
||||
"wrist_yaw": vals[1],
|
||||
"wrist_pitch": vals[2],
|
||||
"elbow_flex": vals[3],
|
||||
"shoulder_roll": vals[4],
|
||||
"shoulder_yaw": vals[5],
|
||||
"shoulder_pitch": vals[6],
|
||||
}
|
||||
|
||||
# Update the last_d dictionary
|
||||
self.last_d = d
|
||||
|
||||
# Also push these new values into the rolling buffers
|
||||
for joint_name, joint_val in d.items():
|
||||
self.joint_buffer[joint_name].append(joint_val)
|
||||
|
||||
# Optional: short sleep to avoid busy-loop
|
||||
# time.sleep(0.001)
|
||||
|
||||
def run_calibration(self, robot):
|
||||
robot.arm_bus.write("Acceleration", 50)
|
||||
n_joints = len(self.joint_names)
|
||||
|
||||
max_open_all = np.zeros(n_joints, dtype=np.float32)
|
||||
min_open_all = np.zeros(n_joints, dtype=np.float32)
|
||||
max_closed_all = np.zeros(n_joints, dtype=np.float32)
|
||||
min_closed_all = np.zeros(n_joints, dtype=np.float32)
|
||||
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
|
||||
print(f"\n--- Calibrating joint '{jname}' ---")
|
||||
|
||||
joint_idx = robot.arm_calib_dict["motor_names"].index(jname)
|
||||
open_val = robot.arm_calib_dict["start_pos"][joint_idx]
|
||||
print(f"Commanding {jname} to OPEN position {open_val}...")
|
||||
robot.arm_bus.write("Goal_Position", [open_val], [jname])
|
||||
|
||||
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
|
||||
|
||||
open_pos_list = []
|
||||
for _ in range(100):
|
||||
all_joints_vals = self.read() # read entire arm
|
||||
open_pos_list.append(all_joints_vals[i]) # store only this joint
|
||||
time.sleep(0.01)
|
||||
|
||||
# Convert to numpy and track min/max
|
||||
open_array = np.array(open_pos_list, dtype=np.float32)
|
||||
max_open_all[i] = open_array.max()
|
||||
min_open_all[i] = open_array.min()
|
||||
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
|
||||
if jname == "elbow_flex":
|
||||
closed_val = closed_val - 700
|
||||
closed_val = robot.arm_calib_dict["end_pos"][joint_idx]
|
||||
print(f"Commanding {jname} to CLOSED position {closed_val}...")
|
||||
robot.arm_bus.write("Goal_Position", [closed_val], [jname])
|
||||
|
||||
input("Physically verify or adjust the joint. Press Enter when ready to capture...")
|
||||
|
||||
closed_pos_list = []
|
||||
for _ in range(100):
|
||||
all_joints_vals = self.read()
|
||||
closed_pos_list.append(all_joints_vals[i])
|
||||
time.sleep(0.01)
|
||||
|
||||
closed_array = np.array(closed_pos_list, dtype=np.float32)
|
||||
# Some thresholding for closed positions
|
||||
#closed_array[closed_array < 1000] = 60000
|
||||
|
||||
max_closed_all[i] = closed_array.max()
|
||||
min_closed_all[i] = closed_array.min()
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [int((closed_val+open_val)/2)], [jname])
|
||||
|
||||
open_pos = np.maximum(max_open_all, max_closed_all)
|
||||
closed_pos = np.minimum(min_open_all, min_closed_all)
|
||||
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname not in ["wrist_pitch", "shoulder_pitch"]:
|
||||
# Swap open/closed for these joints
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
# Debug prints
|
||||
print("\nFinal open/closed arrays after any swaps/inversions:")
|
||||
print(f"open_pos={open_pos}")
|
||||
print(f"closed_pos={closed_pos}")
|
||||
|
||||
|
||||
homing_offset = [0] * n_joints
|
||||
drive_mode = [0] * n_joints
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * n_joints
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
file_path = "examples/hopejr/settings/arm_calib.pkl"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(calib_dict, f)
|
||||
print(f"Dictionary saved to {file_path}")
|
||||
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""
|
||||
Example calibration that linearly maps [start_pos, end_pos] to [0,100].
|
||||
Extend or modify for your needs.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to [0, 100]
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
# Check boundaries
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
# If you want to handle out-of-range differently:
|
||||
# raise JointOutOfRangeError(msg)
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Value = {values[i]} %, expected within [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}]"
|
||||
)
|
||||
print(msg)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self, serial_port: str = "/dev/ttyACM1", baud_rate: int = 115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Number of past values to keep in memory
|
||||
self.buffer_size = 10
|
||||
|
||||
# Initialize a buffer (deque) for each joint
|
||||
self.joint_buffer = {
|
||||
"thumb_0": deque(maxlen=self.buffer_size),
|
||||
"thumb_1": deque(maxlen=self.buffer_size),
|
||||
"thumb_2": deque(maxlen=self.buffer_size),
|
||||
"thumb_3": deque(maxlen=self.buffer_size),
|
||||
"index_0": deque(maxlen=self.buffer_size),
|
||||
"index_1": deque(maxlen=self.buffer_size),
|
||||
"index_2": deque(maxlen=self.buffer_size),
|
||||
"middle_0": deque(maxlen=self.buffer_size),
|
||||
"middle_1": deque(maxlen=self.buffer_size),
|
||||
"middle_2": deque(maxlen=self.buffer_size),
|
||||
"ring_0": deque(maxlen=self.buffer_size),
|
||||
"ring_1": deque(maxlen=self.buffer_size),
|
||||
"ring_2": deque(maxlen=self.buffer_size),
|
||||
"pinky_0": deque(maxlen=self.buffer_size),
|
||||
"pinky_1": deque(maxlen=self.buffer_size),
|
||||
"pinky_2": deque(maxlen=self.buffer_size),
|
||||
"battery_voltage": deque(maxlen=self.buffer_size),
|
||||
}
|
||||
|
||||
# Start the reading thread
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Last read dictionary
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
"""
|
||||
Return the most recent (single) values from self.last_d,
|
||||
optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Get raw (last) values
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(values)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def read_running_average(self, motor_names: list[str] | None = None, linearize=False):
|
||||
"""
|
||||
Return the AVERAGE of the most recent self.buffer_size (or fewer, if not enough data) readings
|
||||
for each joint, optionally applying calibration.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
# Gather averaged readings from buffers
|
||||
smoothed_vals = []
|
||||
for name in motor_names:
|
||||
buf = self.joint_buffer[name]
|
||||
if len(buf) == 0:
|
||||
# If no data has been read yet, fall back to last_d
|
||||
smoothed_vals.append(self.last_d[name])
|
||||
else:
|
||||
# Otherwise, average over the existing buffer
|
||||
smoothed_vals.append(np.mean(buf))
|
||||
|
||||
smoothed_vals = np.array(smoothed_vals, dtype=np.float32)
|
||||
|
||||
# Apply calibration if available
|
||||
if self.calibration is not None:
|
||||
smoothed_vals = self.apply_calibration(smoothed_vals, motor_names)
|
||||
|
||||
return smoothed_vals
|
||||
|
||||
def async_read(self):
|
||||
"""
|
||||
Continuously read from the serial buffer in its own thread,
|
||||
store into `self.last_d` and also append to the rolling buffer (joint_buffer).
|
||||
"""
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
|
||||
# Update the last_d dictionary
|
||||
self.last_d = d
|
||||
|
||||
# Also push these new values into the rolling buffers
|
||||
for joint_name, joint_val in d.items():
|
||||
self.joint_buffer[joint_name].append(joint_val)
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(100):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(100):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in [
|
||||
"thumb_0",
|
||||
"thumb_3",
|
||||
"index_2",
|
||||
"middle_2",
|
||||
"ring_2",
|
||||
"pinky_2",
|
||||
"index_0",
|
||||
]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
|
||||
file_path = "examples/hopejr/settings/hand_calib.pkl"
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
with open(file_path, "wb") as f:
|
||||
pickle.dump(calib_dict, f)
|
||||
print(f"Dictionary saved to {file_path}")
|
||||
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
class EncoderReader:
|
||||
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Start a background thread to continuously read from the serial port
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Store the latest encoder reading in this dictionary
|
||||
self.last_d = {"encoder": 500}
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
# Read one line from serial
|
||||
line = self.serial.readline().decode("utf-8").strip()
|
||||
if line:
|
||||
try:
|
||||
val = int(line) # Parse the incoming line as integer
|
||||
self.last_d["encoder"] = val
|
||||
except ValueError:
|
||||
# If we couldn't parse it as an integer, just skip
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
"""
|
||||
Returns the last encoder value that was read.
|
||||
"""
|
||||
return self.last_d["encoder"]
|
||||
|
||||
class Tac_Man:
|
||||
def __init__(self, serial_port="/dev/ttyUSB1", baud_rate=115200):
|
||||
self.serial_port = serial_port
|
||||
self.baud_rate = baud_rate
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
# Start a background thread to continuously read from the serial port
|
||||
self.thread = threading.Thread(target=self.async_read, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Store the latest encoder readings in this list
|
||||
self.last_d = [0, 0, 0] # Default values for three readings
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
# Read one line from serial
|
||||
line = self.serial.readline().decode("utf-8").strip()
|
||||
if line:
|
||||
try:
|
||||
# Parse the incoming line as three comma-separated integers
|
||||
values = [int(val) for val in line.split(",")]
|
||||
if len(values) == 3: # Ensure we have exactly three values
|
||||
self.last_d = values
|
||||
except ValueError:
|
||||
# If parsing fails, skip this line
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
"""
|
||||
Returns the last encoder values that were read as a list of three integers.
|
||||
"""
|
||||
return self.last_d
|
||||
@@ -1,111 +0,0 @@
|
||||
test and test4
|
||||
installed serial and opencv
|
||||
after pip install -e .
|
||||
pip install -e ".[feetech]"
|
||||
|
||||
robot.hand_bus.read("Present_Position")
|
||||
array([ 349, 799, 1000, 1004, 508, 503, 673, 608, 791, 390, 552,
|
||||
506, 600, 565, 428, 379], dtype=int32)
|
||||
|
||||
robot.hand_bus.write("Goal_Position",[349,799,500,500,508,503,673,608,791,390,552,506,600,565,428,379])
|
||||
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [1825, 2045, 2010, 2035, 1414, 1800, 1615])
|
||||
robot.arm_bus.read("Present_Position")
|
||||
|
||||
robot.arm_bus.write("Goal_Position", [1500], ["elbow_flex"])
|
||||
robot.arm_bus.write("Goal_Position", [2000], ["wrist_yaw"])
|
||||
|
||||
ranges: [600-2300, 1500-2300, 1300-2800, 1000-2500, 600-2800,400-1700, 1300-2300]
|
||||
shoulder_up,
|
||||
shoulder forward,
|
||||
shoulder yaw,
|
||||
elbow_flex
|
||||
wrist_yaw,
|
||||
wrist_pitch,
|
||||
wrist_roll
|
||||
|
||||
COM18
|
||||
|
||||
C:/Users/Lenovo/AppData/Local/Programs/Python/Python310/python.exe c:/Users/Lenovo/Documents/HuggingFace/lerobot/examples/test4.py
|
||||
|
||||
wrist pitch is fucked
|
||||
|
||||
|
||||
so the wrist motor was fucked
|
||||
and we didnt know which one it was because
|
||||
if the chain hjas an issue we dont know how to locate whihc motor is at fault (cables are hard to remove)
|
||||
|
||||
to calibrate:
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM1 \
|
||||
--brand feetech \
|
||||
--model sts3250 \
|
||||
--baudrate 1000000 \
|
||||
--ID 2
|
||||
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM0 \
|
||||
--brand feetech \
|
||||
--model sm8512bl \
|
||||
--baudrate 115200 \
|
||||
--ID 1
|
||||
|
||||
python lerobot/scripts/configure_motor.py \
|
||||
--port /dev/ttyACM1 \
|
||||
--brand feetech \
|
||||
--model scs0009 \
|
||||
--baudrate 1000000 \
|
||||
--ID 30
|
||||
|
||||
why are the motors beeping?
|
||||
|
||||
|
||||
#interpolate between start and end pos
|
||||
robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
|
||||
|
||||
control maj M to look for stuff
|
||||
|
||||
set calibration is useless
|
||||
|
||||
move the joints to that position too
|
||||
|
||||
|
||||
/home/nepyope/Desktop/HuggingFace/lerobot/lerobot/common/robot_devices/motors/feetech.py
|
||||
|
||||
theres clearly some lag, and its probably because of an out of range issue
|
||||
|
||||
|
||||
# hand_calibration = robot.get_hand_calibration()
|
||||
# joint = input("Enter joint name: ")
|
||||
# j1 = f"{joint}_pinky_side"
|
||||
# j2 = f"{joint}_thumb_side"
|
||||
# encoder = EncoderReader("/dev/ttyUSB0", 115200)
|
||||
# start_angle1 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j1)]
|
||||
# end_angle1 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j1)]
|
||||
# start_angle2 = hand_calibration['start_pos'][hand_calibration['motor_names'].index(j2)]
|
||||
# end_angle2 = hand_calibration['end_pos'][hand_calibration['motor_names'].index(j2)]
|
||||
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# while True:
|
||||
# angle1 = int(start_angle1+(end_angle1-start_angle1)*encoder.read()/1000)
|
||||
# angle2 = int(start_angle2+(end_angle2-start_angle2)*encoder.read()/1000)
|
||||
|
||||
# robot.hand_bus.write("Goal_Position",angle1, [j1])
|
||||
# robot.hand_bus.write("Goal_Position",angle2, [j2])
|
||||
# print(angle1, angle2)
|
||||
# time.sleep(0.1)
|
||||
|
||||
# print(robot.hand_bus.find_motor_indices())
|
||||
# exit()
|
||||
|
||||
|
||||
|
||||
maybe divide the 3.3 by 2 and use that as a reference
|
||||
|
||||
https://jlcpcb.com/partdetail/23831236-OPA340UA_UMW/C22365307
|
||||
|
||||
|
||||
-90 is good for the op amp
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
#include <Arduino.h>
|
||||
|
||||
// Define multiplexer input pins
|
||||
#define S0 5
|
||||
#define S1 6
|
||||
#define S2 8
|
||||
#define S3 7
|
||||
#define SENSOR_INPUT 4
|
||||
|
||||
#define SENSOR_COUNT 16
|
||||
|
||||
int rawVals[SENSOR_COUNT];
|
||||
|
||||
void measureRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
digitalWrite(S0, (i & 0b1) ^ 0b1);;
|
||||
digitalWrite(S1, (i >> 1 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S2, (i >> 2 & 0b1) ^ 0b1);;
|
||||
digitalWrite(S3, i >> 3 & 0b1);
|
||||
delay(1);
|
||||
|
||||
rawVals[i] = analogRead(SENSOR_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
void printRawValues() {
|
||||
for (uint8_t i = 0; i < SENSOR_COUNT; i++) {
|
||||
Serial.print(rawVals[i]);
|
||||
if (i < SENSOR_COUNT - 1) Serial.print(" ");
|
||||
}
|
||||
Serial.println();
|
||||
}
|
||||
|
||||
void setup() {
|
||||
Serial.begin(115200);
|
||||
|
||||
pinMode(S0, OUTPUT);
|
||||
pinMode(S1, OUTPUT);
|
||||
pinMode(S2, OUTPUT);
|
||||
pinMode(S3, OUTPUT);
|
||||
|
||||
digitalWrite(S0, LOW);
|
||||
digitalWrite(S1, LOW);
|
||||
digitalWrite(S2, LOW);
|
||||
digitalWrite(S3, LOW);
|
||||
}
|
||||
|
||||
void loop() {
|
||||
measureRawValues();
|
||||
printRawValues();
|
||||
delay(1);
|
||||
}
|
||||
|
Before Width: | Height: | Size: 75 KiB |
|
Before Width: | Height: | Size: 3.2 KiB |
@@ -1,74 +0,0 @@
|
||||
import numpy as np
|
||||
from PIL import Image, ImageSequence
|
||||
|
||||
def coalesce_gif(im):
|
||||
"""
|
||||
Attempt to coalesce frames so each one is a full image.
|
||||
This handles many (though not all) partial-frame GIFs.
|
||||
"""
|
||||
# Convert mode to RGBA
|
||||
im = im.convert("RGBA")
|
||||
|
||||
# Prepare an accumulator the same size as the base frame
|
||||
base = Image.new("RGBA", im.size)
|
||||
frames = []
|
||||
|
||||
# Go through each frame
|
||||
for frame in ImageSequence.Iterator(im):
|
||||
base.alpha_composite(frame.convert("RGBA"))
|
||||
frames.append(base.copy())
|
||||
return frames
|
||||
|
||||
def remove_white_make_black(arr, threshold=250):
|
||||
"""
|
||||
For each pixel in arr (H,W,3), if R,G,B >= threshold, set to black (0,0,0).
|
||||
This effectively 'removes' white so it won't affect the sum.
|
||||
"""
|
||||
mask = (arr[..., 0] >= threshold) & \
|
||||
(arr[..., 1] >= threshold) & \
|
||||
(arr[..., 2] >= threshold)
|
||||
arr[mask] = 0 # set to black
|
||||
|
||||
def main():
|
||||
# Load the animated GIF
|
||||
gif = Image.open("input.gif")
|
||||
|
||||
# Coalesce frames so each is full-size
|
||||
frames = coalesce_gif(gif)
|
||||
if not frames:
|
||||
print("No frames found!")
|
||||
return
|
||||
|
||||
# Convert first frame to RGB array, initialize sum array
|
||||
w, h = frames[0].size
|
||||
sum_array = np.zeros((h, w, 3), dtype=np.uint16) # 16-bit to avoid overflow
|
||||
|
||||
# For each frame:
|
||||
for f in frames:
|
||||
# Convert to RGB
|
||||
rgb = f.convert("RGB")
|
||||
arr = np.array(rgb, dtype=np.uint16) # shape (H, W, 3)
|
||||
|
||||
# Remove near-white by setting it to black
|
||||
remove_white_make_black(arr, threshold=250)
|
||||
|
||||
# Add to sum_array, then clamp to 255
|
||||
sum_array += arr
|
||||
np.clip(sum_array, 0, 255, out=sum_array)
|
||||
|
||||
# Convert sum_array back to 8-bit
|
||||
sum_array = sum_array.astype(np.uint8)
|
||||
|
||||
# Finally, any pixel that stayed black is presumably "empty," so we set it to white
|
||||
black_mask = (sum_array[..., 0] == 0) & \
|
||||
(sum_array[..., 1] == 0) & \
|
||||
(sum_array[..., 2] == 0)
|
||||
sum_array[black_mask] = [255, 255, 255]
|
||||
|
||||
# Create final Pillow image
|
||||
final_img = Image.fromarray(sum_array, mode="RGB")
|
||||
final_img.save("result.png")
|
||||
print("Done! Wrote result.png.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,31 +0,0 @@
|
||||
import time
|
||||
import serial
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Import the motor bus (adjust the import path as needed)
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
|
||||
bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
"leader": [1, "scs0009"],
|
||||
"follower": [2, "scs0009"]
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False
|
||||
)
|
||||
bus.connect()
|
||||
print(bus.read("Present_Position", "leader"))
|
||||
bus.write("Torque_Enable", 0, ["leader"])
|
||||
bus.write("Torque_Enable", 1, ["follower"])
|
||||
for i in range(10000000):
|
||||
time.sleep(0.01)
|
||||
pos = bus.read("Present_Position", "leader")
|
||||
if pos[0] > 1 and pos[0] < 1022:
|
||||
bus.write("Goal_Position", pos, ["follower"])
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,15 +0,0 @@
|
||||
robot:
|
||||
arm_bus:
|
||||
Lock: 0
|
||||
Torque_Limit: 1000
|
||||
Protection_Current: 500
|
||||
Over_Current_Protection_Time: 10
|
||||
Max_Torque_Limit: 1000
|
||||
Overload_Torque: 40 # Play around with this
|
||||
Protection_Time: 1000 # When does it kick in?
|
||||
Protective_Torque: 1
|
||||
Maximum_Acceleration: 100
|
||||
Torque_Enable: 1
|
||||
Acceleration: 30
|
||||
hand_bus:
|
||||
Acceleration: 100
|
||||
@@ -1,61 +0,0 @@
|
||||
import time
|
||||
import numpy as np
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
# Instantiate the bus for a single motor on port /dev/ttyACM0.
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={"wrist_pitch": [1, "scs0009"]},
|
||||
protocol_version=1,
|
||||
group_sync_read=False, # using individual read calls
|
||||
)
|
||||
arm_bus.connect()
|
||||
|
||||
# Configure continuous rotation mode.
|
||||
arm_bus.write("Min_Angle_Limit", 0)
|
||||
arm_bus.write("Max_Angle_Limit", 1024)
|
||||
|
||||
# For model "scs0009", the raw reading runs from 0 to ~1022.
|
||||
resolution_max = 1022 # use 1022 as the effective maximum raw value
|
||||
|
||||
# Read initial raw motor position.
|
||||
prev_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
print("Initial raw position:", prev_raw)
|
||||
|
||||
# Command continuous rotation.
|
||||
arm_bus.write("Goal_Position", 1024)
|
||||
|
||||
# Initialize loop counter.
|
||||
loops_count = 0
|
||||
target_effective = 1780
|
||||
tolerance = 50 # stop when effective position is within ±50 of target
|
||||
|
||||
while True:
|
||||
current_raw = arm_bus.read("Present_Position", ["wrist_pitch"])[0]
|
||||
|
||||
# Detect wrap-around: if the previous reading was near the top (>= 1020)
|
||||
# and current reading is low (< 100), count that as one full loop.
|
||||
if prev_raw >= 1020 and current_raw < 100:
|
||||
loops_count += 1
|
||||
print(f"Wrap detected! loops_count increased to {loops_count}")
|
||||
|
||||
# Compute the effective position.
|
||||
effective_position = loops_count * resolution_max + current_raw
|
||||
print(f"Raw position: {current_raw} | loops_count: {loops_count} | Effective position: {effective_position}")
|
||||
|
||||
# Check if effective position is within tolerance of the target.
|
||||
if abs(effective_position - target_effective) <= tolerance:
|
||||
# Command motor to stop by setting the current raw position as goal.
|
||||
arm_bus.write("Goal_Position", current_raw)
|
||||
print(f"Target reached (effective position: {effective_position}). Stopping motor at raw position {current_raw}.")
|
||||
break
|
||||
|
||||
prev_raw = current_raw
|
||||
time.sleep(0.01) # 10 ms delay
|
||||
|
||||
time.sleep(1)
|
||||
arm_bus.disconnect()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,226 +0,0 @@
|
||||
from follower import HopeJuniorRobot
|
||||
from leader import (
|
||||
HomonculusArm,
|
||||
HomonculusGlove,
|
||||
EncoderReader
|
||||
)
|
||||
from visualizer import value_to_color
|
||||
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
import pickle
|
||||
import pygame
|
||||
import typer
|
||||
|
||||
def main(
|
||||
calibrate_glove: bool = typer.Option(False, "--calibrate-glove", help="Calibrate the glove"),
|
||||
calibrate_exoskeleton: bool = typer.Option(False, "--calibrate-exoskeleton", help="Calibrate the exoskeleton"),
|
||||
freeze_fingers: bool = typer.Option(False, "--freeze-fingers", help="Freeze the fingers"),
|
||||
freeze_arm: bool = typer.Option(False, "--freeze-arm", help="Freeze the arm")):
|
||||
show_loads: bool = typer.Option(False, "--show-loads", help="Show the loads in a GUI")
|
||||
robot = HopeJuniorRobot()
|
||||
|
||||
|
||||
robot.connect_hand()
|
||||
robot.connect_arm()
|
||||
#read pos
|
||||
print(robot.hand_bus.read("Present_Position"))
|
||||
print(robot.arm_bus.read("Present_Position", "shoulder_pitch"))
|
||||
print(robot.arm_bus.read("Present_Position",["shoulder_yaw","shoulder_roll","elbow_flex","wrist_roll","wrist_yaw","wrist_pitch"]))
|
||||
#robot.arm_bus.write("Goal_Position", robot.arm_calib_dict["start_pos"][0]*1 +robot.arm_calib_dict["end_pos"][0]*0, ["wrist_roll"])
|
||||
for i in range(10):
|
||||
time.sleep(0.1)
|
||||
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
|
||||
|
||||
# #calibrate arm
|
||||
arm_calibration = robot.get_arm_calibration()
|
||||
exoskeleton = HomonculusArm(serial_port="/dev/tty.usbmodem1201")
|
||||
|
||||
|
||||
if calibrate_exoskeleton:
|
||||
exoskeleton.run_calibration(robot)
|
||||
|
||||
file_path = "examples/hopejr/settings/arm_calib.pkl"
|
||||
with open(file_path, "rb") as f:
|
||||
calib_dict = pickle.load(f)
|
||||
print("Loaded dictionary:", calib_dict)
|
||||
exoskeleton.set_calibration(calib_dict)
|
||||
|
||||
#calibrate hand
|
||||
hand_calibration = robot.get_hand_calibration()
|
||||
glove = HomonculusGlove(serial_port = "/dev/tty.usbmodem1101")
|
||||
|
||||
if calibrate_glove:
|
||||
glove.run_calibration()
|
||||
|
||||
file_path = "examples/hopejr/settings/hand_calib.pkl"
|
||||
with open(file_path, "rb") as f:
|
||||
calib_dict = pickle.load(f)
|
||||
print("Loaded dictionary:", calib_dict)
|
||||
glove.set_calibration(calib_dict)
|
||||
|
||||
robot.hand_bus.set_calibration(hand_calibration)
|
||||
robot.arm_bus.set_calibration(arm_calibration)
|
||||
|
||||
# Initialize Pygame
|
||||
# pygame.init()
|
||||
|
||||
# # Set up the display
|
||||
# screen = pygame.display.set_mode((800, 600))
|
||||
|
||||
# pygame.display.set_caption("Robot Hand Visualization")
|
||||
|
||||
|
||||
# # Create hand structure with 16 squares and initial values
|
||||
# hand_components = []
|
||||
|
||||
# # Add thumb (4 squares in diamond shape)
|
||||
# thumb_positions = [
|
||||
# (150, 300), (125, 350),
|
||||
# (175, 350), (150, 400)
|
||||
# ]
|
||||
# for pos in thumb_positions:
|
||||
# hand_components.append({"pos": pos, "value": 0})
|
||||
|
||||
# # Add fingers (4 fingers with 3 squares each in vertical lines)
|
||||
# finger_positions = [
|
||||
# (200, 100), # Index
|
||||
# (250, 100), # Middle
|
||||
# (300, 100), # Ring
|
||||
# (350, 100) # Pinky
|
||||
# ]
|
||||
|
||||
# for x, y in finger_positions:
|
||||
# for i in range(3):
|
||||
# hand_components.append({"pos": (x, y + i * 50), "value": 0})
|
||||
|
||||
for i in range(1000000000000000):
|
||||
robot.apply_arm_config('examples/hopejr/settings/config.yaml')
|
||||
#robot.arm_bus.write("Acceleration", 50, "shoulder_yaw")
|
||||
joint_names = ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
|
||||
#only wrist roll
|
||||
#joint_names = ["shoulder_pitch"]
|
||||
joint_values = exoskeleton.read(motor_names=joint_names)
|
||||
|
||||
#joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch"]
|
||||
#motor_names += ["shoulder_pitch"]
|
||||
motor_values += [joint_dict[name] for name in motor_names]
|
||||
#remove 50 from shoulder_roll
|
||||
#motor_values += [joint_dict[name] for name in motor_names]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
print(motor_names, motor_values)
|
||||
if not freeze_arm:
|
||||
robot.arm_bus.write("Goal_Position", motor_values, motor_names)
|
||||
|
||||
if not freeze_fingers:#include hand
|
||||
hand_joint_names = []
|
||||
hand_joint_names += ["thumb_3", "thumb_2", "thumb_1", "thumb_0"]#, "thumb_3"]
|
||||
hand_joint_names += ["index_0", "index_1", "index_2"]
|
||||
hand_joint_names += ["middle_0", "middle_1", "middle_2"]
|
||||
hand_joint_names += ["ring_0", "ring_1", "ring_2"]
|
||||
hand_joint_names += ["pinky_0", "pinky_1", "pinky_2"]
|
||||
hand_joint_values = glove.read(hand_joint_names)
|
||||
hand_joint_values = hand_joint_values.round( ).astype(int)
|
||||
hand_joint_dict = {k: v for k, v in zip(hand_joint_names, hand_joint_values, strict=False)}
|
||||
|
||||
hand_motor_values = []
|
||||
hand_motor_names = []
|
||||
|
||||
# Thumb
|
||||
hand_motor_names += ["thumb_basel_rotation", "thumb_mcp", "thumb_pip", "thumb_dip"]#, "thumb_MCP"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["thumb_3"],
|
||||
hand_joint_dict["thumb_2"],
|
||||
hand_joint_dict["thumb_1"],
|
||||
hand_joint_dict["thumb_0"]
|
||||
]
|
||||
|
||||
# # Index finger
|
||||
index_splay = 0.1
|
||||
hand_motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["index_2"],
|
||||
(100 - hand_joint_dict["index_0"]) * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
|
||||
hand_joint_dict["index_0"] * index_splay + hand_joint_dict["index_1"] * (1 - index_splay),
|
||||
]
|
||||
|
||||
# Middle finger
|
||||
middle_splay = 0.1
|
||||
hand_motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["middle_2"],
|
||||
hand_joint_dict["middle_0"] * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
|
||||
(100 - hand_joint_dict["middle_0"]) * middle_splay + hand_joint_dict["middle_1"] * (1 - middle_splay),
|
||||
]
|
||||
|
||||
# # Ring finger
|
||||
ring_splay = 0.1
|
||||
hand_motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["ring_2"],
|
||||
(100 - hand_joint_dict["ring_0"]) * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
|
||||
hand_joint_dict["ring_0"] * ring_splay + hand_joint_dict["ring_1"] * (1 - ring_splay),
|
||||
]
|
||||
|
||||
# # Pinky finger
|
||||
pinky_splay = -.1
|
||||
hand_motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
hand_motor_values += [
|
||||
hand_joint_dict["pinky_2"],
|
||||
hand_joint_dict["pinky_0"] * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
|
||||
(100 - hand_joint_dict["pinky_0"]) * pinky_splay + hand_joint_dict["pinky_1"] * (1 - pinky_splay),
|
||||
]
|
||||
|
||||
hand_motor_values = np.array(hand_motor_values)
|
||||
hand_motor_values = np.clip(hand_motor_values, 0, 100)
|
||||
robot.hand_bus.write("Acceleration", 255, hand_motor_names)
|
||||
robot.hand_bus.write("Goal_Position", hand_motor_values, hand_motor_names)
|
||||
|
||||
# if i%20==0 and i > 100:
|
||||
# try:
|
||||
# loads = robot.hand_bus.read("Present_Load")
|
||||
# for i, comp in enumerate(hand_components):
|
||||
# # Wave oscillates between 0 and 2024:
|
||||
# # Center (1012) +/- 1012 * sin(...)
|
||||
# comp["value"] = loads[i]
|
||||
# except:
|
||||
# pass
|
||||
|
||||
|
||||
time.sleep(0.01)
|
||||
|
||||
# for event in pygame.event.get():
|
||||
# if event.type == pygame.QUIT:
|
||||
# robot.hand_bus.disconnect()
|
||||
# robot.arm_bus.disconnect()
|
||||
# exit()
|
||||
# # Check for user pressing 'q' to quit
|
||||
# if event.type == pygame.KEYDOWN:
|
||||
# if event.key == pygame.K_q:
|
||||
# robot.hand_bus.disconnect()
|
||||
# robot.arm_bus.disconnect()
|
||||
# exit()
|
||||
|
||||
# # Draw background
|
||||
# screen.fill((0, 0, 0)) # Black background
|
||||
|
||||
# # Draw hand components
|
||||
# for comp in hand_components:
|
||||
# x, y = comp["pos"]
|
||||
# color = value_to_color(comp["value"])
|
||||
# pygame.draw.rect(screen, color, (x, y, 30, 30))
|
||||
|
||||
# pygame.display.flip()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
typer.run(main)
|
||||
@@ -1,135 +0,0 @@
|
||||
import serial
|
||||
import threading
|
||||
import time
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Thread function to read from a serial port continuously until stop_event is set.
|
||||
def read_serial(port, baudrate, stop_event, data_list):
|
||||
try:
|
||||
ser = serial.Serial(port, baudrate, timeout=1)
|
||||
except Exception as e:
|
||||
print(f"Error opening {port}: {e}")
|
||||
return
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
except Exception as e:
|
||||
print(f"Decode error on {port}: {e}")
|
||||
continue
|
||||
|
||||
if line:
|
||||
try:
|
||||
# Split the line into integer values.
|
||||
values = [int(x) for x in line.split()]
|
||||
# For ACM1, ignore the extra value if present.
|
||||
if len(values) >= 16:
|
||||
if len(values) > 16:
|
||||
values = values[:16]
|
||||
# Save the timestamp (relative to start) with the sensor readings.
|
||||
timestamp = time.time()
|
||||
data_list.append((timestamp, values))
|
||||
except Exception as e:
|
||||
print(f"Error parsing line from {port}: '{line}' -> {e}")
|
||||
ser.close()
|
||||
|
||||
def main():
|
||||
# --- Configuration ---
|
||||
# Set your serial port names here (adjust for your system)
|
||||
acm0_port = "/dev/ttyACM0" # Example for Linux (or "COM3" on Windows)
|
||||
acm1_port = "/dev/ttyACM1" # Example for Linux (or "COM4" on Windows)
|
||||
baudrate = 115200
|
||||
|
||||
# Data storage for each device:
|
||||
data_acm0 = [] # Will hold tuples of (timestamp, [16 sensor values])
|
||||
data_acm1 = []
|
||||
|
||||
# Event to signal threads to stop reading.
|
||||
stop_event = threading.Event()
|
||||
|
||||
# Create and start reader threads.
|
||||
thread_acm0 = threading.Thread(target=read_serial, args=(acm0_port, baudrate, stop_event, data_acm0))
|
||||
thread_acm1 = threading.Thread(target=read_serial, args=(acm1_port, baudrate, stop_event, data_acm1))
|
||||
thread_acm0.start()
|
||||
thread_acm1.start()
|
||||
|
||||
# Record data for 10 seconds.
|
||||
record_duration = 10 # seconds
|
||||
start_time = time.time()
|
||||
time.sleep(record_duration)
|
||||
stop_event.set() # signal threads to stop
|
||||
|
||||
# Wait for both threads to finish.
|
||||
thread_acm0.join()
|
||||
thread_acm1.join()
|
||||
print("Finished recording.")
|
||||
|
||||
# --- Process the Data ---
|
||||
# Convert lists of (timestamp, values) to numpy arrays.
|
||||
# Compute time relative to the start of the recording.
|
||||
times_acm0 = np.array([t - start_time for t, _ in data_acm0])
|
||||
sensor_acm0 = np.array([vals for _, vals in data_acm0]) # shape (N0, 16)
|
||||
|
||||
times_acm1 = np.array([t - start_time for t, _ in data_acm1])
|
||||
sensor_acm1 = np.array([vals for _, vals in data_acm1]) # shape (N1, 16)
|
||||
|
||||
# --- Plot 1: Overlapping Time Series ---
|
||||
plt.figure(figsize=(12, 8))
|
||||
# Plot each sensor from ACM0 in red.
|
||||
for i in range(16):
|
||||
plt.plot(times_acm0, sensor_acm0[:, i], color='red', alpha=0.7,
|
||||
label='ACM0 Sensor 1' if i == 0 else None)
|
||||
# Plot each sensor from ACM1 in blue.
|
||||
for i in range(16):
|
||||
plt.plot(times_acm1, sensor_acm1[:, i], color='blue', alpha=0.7,
|
||||
label='ACM1 Sensor 1' if i == 0 else None)
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Sensor Reading")
|
||||
plt.title("Overlapping Sensor Readings (ACM0 in Red, ACM1 in Blue)")
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
plt.savefig("overlapping_sensor_readings.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved overlapping_sensor_readings.png")
|
||||
|
||||
# --- Plot 2: Variance of Noise for Each Sensor ---
|
||||
# Compute variance (over time) for each sensor channel.
|
||||
variance_acm0 = np.var(sensor_acm0, axis=0)
|
||||
variance_acm1 = np.var(sensor_acm1, axis=0)
|
||||
sensor_numbers = np.arange(1, 17)
|
||||
bar_width = 0.35
|
||||
|
||||
plt.figure(figsize=(12, 6))
|
||||
plt.bar(sensor_numbers - bar_width/2, variance_acm0, bar_width, color='red', label='ACM0')
|
||||
plt.bar(sensor_numbers + bar_width/2, variance_acm1, bar_width, color='blue', label='ACM1')
|
||||
plt.xlabel("Sensor Number")
|
||||
plt.ylabel("Variance")
|
||||
plt.title("Noise Variance per Sensor")
|
||||
plt.xticks(sensor_numbers)
|
||||
plt.legend()
|
||||
plt.tight_layout()
|
||||
plt.savefig("sensor_variance.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved sensor_variance.png")
|
||||
|
||||
# --- Plot 3: Difference Between ACM0 and ACM1 Readings ---
|
||||
# Since the two devices may not sample at exactly the same time,
|
||||
# we interpolate ACM1's data onto ACM0's time base for each sensor.
|
||||
plt.figure(figsize=(12, 8))
|
||||
for i in range(16):
|
||||
if len(times_acm1) > 1 and len(times_acm0) > 1:
|
||||
interp_acm1 = np.interp(times_acm0, times_acm1, sensor_acm1[:, i])
|
||||
diff = sensor_acm0[:, i] - interp_acm1
|
||||
plt.plot(times_acm0, diff, label=f"Sensor {i+1}")
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Difference (ACM0 - ACM1)")
|
||||
plt.title("Difference in Sensor Readings")
|
||||
plt.legend(fontsize='small', ncol=2)
|
||||
plt.tight_layout()
|
||||
plt.savefig("sensor_differences.png", dpi=300)
|
||||
plt.close()
|
||||
print("Saved sensor_differences.png")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Before Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 1.3 MiB |
|
Before Width: | Height: | Size: 87 KiB |
@@ -1,84 +0,0 @@
|
||||
import time
|
||||
import serial
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Import the motor bus (adjust the import path as needed)
|
||||
from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus
|
||||
|
||||
def main():
|
||||
# -------------------------------
|
||||
# Setup the motor bus (ACM0)
|
||||
# -------------------------------
|
||||
arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM0",
|
||||
motors={
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
arm_bus.connect()
|
||||
|
||||
# -------------------------------
|
||||
# Setup the serial connection for sensor (ACM1)
|
||||
# -------------------------------
|
||||
try:
|
||||
ser = serial.Serial("/dev/ttyACM1", 115200, timeout=1)
|
||||
except Exception as e:
|
||||
print(f"Error opening serial port /dev/ttyACM1: {e}")
|
||||
return
|
||||
|
||||
# Lists to store the motor positions and sensor values.
|
||||
positions = []
|
||||
sensor_values = []
|
||||
|
||||
# -------------------------------
|
||||
# Loop: move motor and collect sensor data
|
||||
# -------------------------------
|
||||
# We assume that 2800 > 1480 so we decrement by 10 each step.
|
||||
for pos in range(2800, 1500, -10): # 2800 down to 1480 (inclusive)
|
||||
# Command the motor to go to position 'pos'
|
||||
arm_bus.write("Goal_Position", pos, ["wrist_pitch"])
|
||||
|
||||
# Wait a short period for the motor to move and the sensor to update.
|
||||
time.sleep(0.01)
|
||||
|
||||
# Read one line from the sensor device.
|
||||
sensor_val = np.nan # default if reading fails
|
||||
try:
|
||||
line = ser.readline().decode('utf-8').strip()
|
||||
if line:
|
||||
# Split the line into parts and convert each part to int.
|
||||
parts = line.split()
|
||||
# Ensure there are enough values (we expect at least 15 values)
|
||||
if len(parts) >= 15:
|
||||
values = [int(x) for x in parts]
|
||||
# Use the 15th value (index 14)
|
||||
sensor_val = values[14]
|
||||
except Exception as e:
|
||||
print(f"Error parsing sensor data: {e}")
|
||||
|
||||
positions.append(pos)
|
||||
sensor_values.append(sensor_val)
|
||||
print(f"Motor pos: {pos} | Sensor 15th value: {sensor_val}")
|
||||
|
||||
#move it back to
|
||||
arm_bus.write("Goal_Position", 2800, ["wrist_pitch"])
|
||||
# -------------------------------
|
||||
# Plot the data: Motor Angle vs. Sensor 15th Value
|
||||
# -------------------------------
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(positions, sensor_values, marker='o', linestyle='-')
|
||||
plt.xlabel("Motor Angle")
|
||||
plt.ylabel("Sensor 15th Value")
|
||||
plt.title("Motor Angle vs Sensor 15th Value")
|
||||
plt.grid(True)
|
||||
plt.savefig("asd.png", dpi=300)
|
||||
plt.close()
|
||||
print("Plot saved as asd.png")
|
||||
|
||||
# Close the serial connection.
|
||||
ser.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,682 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "COM10"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="COM14",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="COM15",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
250 + 700,
|
||||
750 - 700,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
#self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
|
||||
exit()
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.arm_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_1", "index_2"]
|
||||
joint_names += ["middle_1", "middle_2"]
|
||||
joint_names += ["ring_1", "ring_2"]
|
||||
joint_names += ["pinky_1", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
motor_values += [
|
||||
joint_dict["thumb_3"],
|
||||
joint_dict["thumb_0"],
|
||||
joint_dict["thumb_2"],
|
||||
joint_dict["thumb_2"],
|
||||
]
|
||||
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
|
||||
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,231 +0,0 @@
|
||||
|
||||
#robot.arm_bus.write("Acceleration", [20], ["shoulder_pitch"])
|
||||
|
||||
####DEBUGGER####################
|
||||
# joint = input("Enter joint name: ")
|
||||
# encoder = EncoderReader("/dev/ttyUSB1", 115200)
|
||||
# start_angle = arm_calibration['start_pos'][arm_calibration['motor_names'].index(joint)]
|
||||
# end_angle = arm_calibration['end_pos'][arm_calibration['motor_names'].index(joint)]
|
||||
# # start_angle = shoulder_calibration['start_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# # end_angle = shoulder_calibration['end_pos'][shoulder_calibration['motor_names'].index(joint)]
|
||||
# while True:
|
||||
# angle = int(start_angle+(end_angle-start_angle)*encoder.read()/1000)
|
||||
# # robot.shoulder_bus.set_bus_baudrate(115200)
|
||||
# # robot.shoulder_bus.write("Goal_Position",angle, [joint])
|
||||
# robot.shoulder_bus.set_bus_baudrate(1000000)
|
||||
# robot.arm_bus.write("Goal_Position",angle, [joint])
|
||||
# print(angle)
|
||||
# time.sleep(0.1)
|
||||
|
||||
|
||||
|
||||
#####SAFETY CHECKS EXPLAINED#####
|
||||
#There are two safety checks built-in: one is based on load and the other is based on current.
|
||||
#Current: if Protection_Current > Present_Current we wait Over_Current_Protection_Time (expressed in ms) and set Torque_Enable to 0
|
||||
#Load: if Max_Torque_Limit*Overload_Torque (expressed as a percentage) > Present_Load, we wait Protection_Time (expressed in ms
|
||||
#and set Max_Torque_Limit to Protective_Torque)
|
||||
#Though we can specify Min-Max_Angle_Limit, Max_Temperature_Limit, Min-Max_Voltage_Limit, no safety checks are implemented for these values
|
||||
|
||||
#robot.arm_bus.set_calibration(arm_calibration)
|
||||
|
||||
|
||||
|
||||
|
||||
#method 1
|
||||
# robot.arm_bus.write("Overload_Torque", 80)
|
||||
# robot.arm_bus.write("Protection_Time", 10)
|
||||
# robot.arm_bus.write("Protective_Torque", 1)
|
||||
# robot.arm_bus.write("Protection_Current", 200,["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
|
||||
|
||||
#method 2
|
||||
# robot.arm_bus.write("Protection_Current", 500,["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 10)
|
||||
# robot.arm_bus.write("Max_Torque_Limit", 1000)
|
||||
# robot.arm_bus.write("Overload_Torque", 40)
|
||||
# robot.arm_bus.write("Protection_Time", 10)
|
||||
# robot.arm_bus.write("Protective_Torque", 1)
|
||||
|
||||
# robot.shoulder_bus.set_bus_baudrate(115200)
|
||||
# robot.shoulder_bus.write("Goal_Position",2500)
|
||||
# exit()
|
||||
|
||||
######LOGGER####################
|
||||
# from test_torque.log_and_plot_feetech import log_and_plot_params
|
||||
|
||||
# params_to_log = [
|
||||
# "Protection_Current",
|
||||
# "Present_Current",
|
||||
# "Max_Torque_Limit",
|
||||
# "Protection_Time",
|
||||
# "Overload_Torque",
|
||||
# "Present_Load",
|
||||
# "Present_Position",
|
||||
# ]
|
||||
|
||||
# servo_names = ["shoulder_pitch"]
|
||||
|
||||
|
||||
# servo_data, timestamps = log_and_plot_params(robot.shoulder_bus, params_to_log, servo_names, test_id="shoulder_pitch")
|
||||
# exit()
|
||||
|
||||
|
||||
#robot.arm_bus.write("Goal_Position",2300, ["shoulder_pitch"])
|
||||
# dt = 2
|
||||
# steps = 4
|
||||
# max_pos = 1500
|
||||
# min_pos = 2300
|
||||
# increment = (max_pos - min_pos) / steps
|
||||
# # Move from min_pos to max_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = min_pos + int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
|
||||
# time.sleep(dt)
|
||||
|
||||
# # Move back from max_pos to min_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = max_pos - int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["shoulder_pitch"])
|
||||
# time.sleep(dt)shoulder_pitch
|
||||
#demo to show how sending a lot of values makes the robt shake
|
||||
|
||||
|
||||
|
||||
# # Step increment
|
||||
#
|
||||
|
||||
# # Move from min_pos to max_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = min_pos + int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
|
||||
# time.sleep(dt)
|
||||
|
||||
# # Move back from max_pos to min_pos in steps
|
||||
# for i in range(steps + 1): # Include the last step
|
||||
# current_pos = max_pos - int(i * increment)
|
||||
# robot.arm_bus.write("Goal_Position", [current_pos], ["elbow_flex"])
|
||||
# time.sleep(dt)
|
||||
# exit()
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", a # shoulder_calibration = robot.get_shoulder_calibration()
|
||||
# print(shoulder_calibration)m_calibration["start_pos"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 50)
|
||||
# robot.arm_bus.write("Protection_Current", 310, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Overload_Torque", 80, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Protection_Time", 100, ["shoulder_pitch"])
|
||||
# robot.arm_bus.write("Over_Current_Protection_Time", 50, ["shoulder_pitch"])
|
||||
|
||||
# robot.arm_bus.write("Protective_Torque", 20, ["shoulder_pitch"])
|
||||
|
||||
|
||||
# robot.arm_bus.write("Goal_Position", [600],["shoulder_pitch"])
|
||||
|
||||
# from test_torque.log_and_plot_feetech import log_and_plot_params
|
||||
|
||||
# params_to_log = [
|
||||
# "Present_Current",
|
||||
# "Protection_Current",
|
||||
# "Overload_Torque",
|
||||
# "Protection_Time",
|
||||
# "Protective_Torque",
|
||||
# "Present_Load",
|
||||
# "Present_Position",
|
||||
# ]
|
||||
|
||||
# servo_names = ["shoulder_pitch"]
|
||||
|
||||
#
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", arm_calibration["start_pos"])
|
||||
|
||||
#robot.hand_bus.set_calibration(hand_calibration)
|
||||
|
||||
#interp = 0.3
|
||||
|
||||
#robot.arm_bus.write("Goal_Position", [int((i*interp+j*(1-interp))) for i, j in zip(arm_calibration["start_pos"], arm_calibration["end_pos"])])
|
||||
#exit()
|
||||
|
||||
# glove = HomonculusGlove()
|
||||
# glove.run_calibration()
|
||||
|
||||
|
||||
|
||||
####GOOD FOR GRASPING
|
||||
# start_pos = [
|
||||
# 500,
|
||||
# 900,
|
||||
# 500,
|
||||
# 1000,
|
||||
# 100,
|
||||
# 450,#250
|
||||
# 950,#750
|
||||
# 100,
|
||||
# 300,#400
|
||||
# 50,#150
|
||||
# 100,
|
||||
# 120,
|
||||
# 980,
|
||||
# 100,
|
||||
# 950,
|
||||
# 750,
|
||||
# ]
|
||||
# end_pos = [
|
||||
# start_pos[0] - 400,
|
||||
# start_pos[1] - 300,
|
||||
# start_pos[2] + 500,
|
||||
# start_pos[3] - 50,
|
||||
# start_pos[4] + 900,
|
||||
# start_pos[5] + 500,
|
||||
# start_pos[6] - 500,
|
||||
# start_pos[7] + 900,
|
||||
# start_pos[8] + 700,
|
||||
# start_pos[9] + 700,
|
||||
# start_pos[10] + 900,
|
||||
# start_pos[11] + 700,
|
||||
# start_pos[12] - 700,
|
||||
# start_pos[13] + 900,
|
||||
# start_pos[14] - 700,
|
||||
# start_pos[15] - 700,
|
||||
# ]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
SCS_SERIES_CONTROL_TABLE = {
|
||||
|
||||
# "Max_Torque_Limit": (16, 2),
|
||||
# "Phase": (18, 1),
|
||||
# "Unloading_Condition": (19, 1),
|
||||
|
||||
"Protective_Torque": (37, 1),
|
||||
"Protection_Time": (38, 1),
|
||||
#Baud_Rate": (48, 1),
|
||||
|
||||
}
|
||||
|
||||
def read_and_print_scs_values(robot):
|
||||
for param_name in SCS_SERIES_CONTROL_TABLE:
|
||||
value = robot.hand_bus.read(param_name)
|
||||
print(f"{param_name}: {value}")
|
||||
|
||||
motor_1_values = {
|
||||
"Lock" : 255,
|
||||
#"Protection_Time": 20#so if you write to these they turn to 0 for some fucking reason. protection time was 100, procetive to
|
||||
}
|
||||
|
||||
# motor_1_values = {
|
||||
# "Lock": 1,
|
||||
# "Protection_Time": 100,
|
||||
# "Protective_Torque": 20,
|
||||
# "Phase": 1,#thisu is bullshit
|
||||
# "Unloading_Condition": 32,
|
||||
|
||||
# }
|
||||
#bug in writing to specific values of the scs0009
|
||||
|
||||
# Write values to motor 2, there is overload torque there
|
||||
#ok so i can write, the jittering is because of the overload torque which is still being triggered
|
||||
|
||||
#TODO: i have to write a functioining version for the sc009 (or i dont who cares)
|
||||
@@ -1,18 +0,0 @@
|
||||
# Color gradient function (0-2024 scaled to 0-10)
|
||||
def value_to_color(value):
|
||||
# Clamp the value between 0 and 2024
|
||||
value = max(0, min(2024, value))
|
||||
|
||||
# Scale from [0..2024] to [0..10]
|
||||
scaled_value = (value / 2024) * 10
|
||||
|
||||
# Green to Yellow (scaled_value 0..5), then Yellow to Red (scaled_value 5..10)
|
||||
if scaled_value <= 5:
|
||||
r = int(255 * (scaled_value / 5))
|
||||
g = 255
|
||||
else:
|
||||
r = 255
|
||||
g = int(255 * (1 - (scaled_value - 5) / 5))
|
||||
b = 0
|
||||
|
||||
return (r, g, b)
|
||||
94
examples/robots/lekiwi_client_app.py
Executable file
@@ -0,0 +1,94 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
||||
from lerobot.common.datasets.utils import hw_to_dataset_features
|
||||
from lerobot.common.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
|
||||
from lerobot.common.robots.lekiwi.lekiwi_client import LeKiwiClient
|
||||
from lerobot.common.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig
|
||||
from lerobot.common.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
|
||||
|
||||
NB_CYCLES_CLIENT_CONNECTION = 250
|
||||
|
||||
|
||||
def main():
|
||||
logging.info("Configuring Teleop Devices")
|
||||
leader_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem58760433331")
|
||||
leader_arm = SO100Leader(leader_arm_config)
|
||||
|
||||
keyboard_config = KeyboardTeleopConfig()
|
||||
keyboard = KeyboardTeleop(keyboard_config)
|
||||
|
||||
logging.info("Configuring LeKiwi Client")
|
||||
robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
|
||||
robot = LeKiwiClient(robot_config)
|
||||
|
||||
logging.info("Creating LeRobot Dataset")
|
||||
|
||||
action_features = hw_to_dataset_features(robot.action_features, "action")
|
||||
obs_features = hw_to_dataset_features(robot.observation_features, "observation")
|
||||
dataset_features = {**action_features, **obs_features}
|
||||
|
||||
dataset = LeRobotDataset.create(
|
||||
repo_id="user/lekiwi" + str(int(time.time())),
|
||||
fps=10,
|
||||
features=dataset_features,
|
||||
robot_type=robot.name,
|
||||
)
|
||||
|
||||
logging.info("Connecting Teleop Devices")
|
||||
leader_arm.connect()
|
||||
keyboard.connect()
|
||||
|
||||
logging.info("Connecting remote LeKiwi")
|
||||
robot.connect()
|
||||
|
||||
if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected:
|
||||
logging.error("Failed to connect to all devices")
|
||||
return
|
||||
|
||||
logging.info("Starting LeKiwi teleoperation")
|
||||
i = 0
|
||||
while i < NB_CYCLES_CLIENT_CONNECTION:
|
||||
arm_action = leader_arm.get_action()
|
||||
base_action = keyboard.get_action()
|
||||
action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
|
||||
|
||||
action_sent = robot.send_action(action)
|
||||
observation = robot.get_observation()
|
||||
|
||||
frame = {**action_sent, **observation}
|
||||
task = "Dummy Example Task Dataset"
|
||||
|
||||
logging.info("Saved a frame into the dataset")
|
||||
dataset.add_frame(frame, task)
|
||||
i += 1
|
||||
|
||||
logging.info("Disconnecting Teleop Devices and LeKiwi Client")
|
||||
robot.disconnect()
|
||||
leader_arm.disconnect()
|
||||
keyboard.disconnect()
|
||||
|
||||
logging.info("Uploading dataset to the hub")
|
||||
dataset.save_episode()
|
||||
dataset.push_to_hub()
|
||||
|
||||
logging.info("Finished LeKiwi cleanly")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
681
examples/test.py
@@ -1,681 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem21401"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in ["thumb_0", "thumb_3", "index_2", "middle_2", "ring_2", "pinky_0", "pinky_2"]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem58760429571",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
250 + 700,
|
||||
750 - 700,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_1", "index_2"]
|
||||
joint_names += ["middle_1", "middle_2"]
|
||||
joint_names += ["ring_1", "ring_2"]
|
||||
joint_names += ["pinky_1", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
motor_values += [
|
||||
joint_dict["thumb_3"],
|
||||
joint_dict["thumb_0"],
|
||||
joint_dict["thumb_2"],
|
||||
joint_dict["thumb_2"],
|
||||
]
|
||||
motor_names += ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
motor_values += [joint_dict["index_2"], joint_dict["index_1"], joint_dict["index_1"]]
|
||||
motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
motor_values += [joint_dict["pinky_2"], joint_dict["pinky_1"], joint_dict["pinky_1"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,133 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# ********* Ping Example *********
|
||||
#
|
||||
#
|
||||
# Available SCServo model on this example : All models using Protocol SCS
|
||||
# This example is tested with a SCServo(STS/SMS/SCS), and an URT
|
||||
# Be sure that SCServo(STS/SMS/SCS) properties are already set as %% ID : 1 / Baudnum : 6 (Baudrate : 1000000)
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
if os.name == "nt":
|
||||
import msvcrt
|
||||
|
||||
def getch():
|
||||
return msvcrt.getch().decode()
|
||||
else:
|
||||
import sys
|
||||
import termios
|
||||
import tty
|
||||
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
|
||||
def getch():
|
||||
try:
|
||||
tty.setraw(sys.stdin.fileno())
|
||||
ch = sys.stdin.read(1)
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
return ch
|
||||
|
||||
|
||||
from scservo_sdk import * # Uses SCServo SDK library
|
||||
|
||||
# Default setting
|
||||
SCS_ID = 1 # SCServo ID : 1
|
||||
BAUDRATE = 1000000 # SCServo default baudrate : 1000000
|
||||
DEVICENAME = "/dev/tty.usbserial-2130" # Check which port is being used on your controller
|
||||
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
|
||||
|
||||
protocol_end = 1 # SCServo bit end(STS/SMS=0, SCS=1)
|
||||
|
||||
# Initialize PortHandler instance
|
||||
# Set the port path
|
||||
# Get methods and members of PortHandlerLinux or PortHandlerWindows
|
||||
portHandler = PortHandler(DEVICENAME)
|
||||
|
||||
# Initialize PacketHandler instance
|
||||
# Get methods and members of Protocol
|
||||
packetHandler = PacketHandler(protocol_end)
|
||||
|
||||
# Open port
|
||||
if portHandler.openPort():
|
||||
print("Succeeded to open the port")
|
||||
else:
|
||||
print("Failed to open the port")
|
||||
print("Press any key to terminate...")
|
||||
getch()
|
||||
quit()
|
||||
|
||||
|
||||
# Set port baudrate
|
||||
if portHandler.setBaudRate(BAUDRATE):
|
||||
print("Succeeded to change the baudrate")
|
||||
else:
|
||||
print("Failed to change the baudrate")
|
||||
print("Press any key to terminate...")
|
||||
getch()
|
||||
quit()
|
||||
|
||||
# Try to ping the SCServo
|
||||
# Get SCServo model number
|
||||
scs_model_number, scs_comm_result, scs_error = packetHandler.ping(portHandler, SCS_ID)
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
|
||||
elif scs_error != 0:
|
||||
print("%s" % packetHandler.getRxPacketError(scs_error))
|
||||
else:
|
||||
print("[ID:%03d] ping Succeeded. SCServo model number : %d" % (SCS_ID, scs_model_number))
|
||||
|
||||
|
||||
ADDR_SCS_PRESENT_POSITION = 56
|
||||
scs_present_position, scs_comm_result, scs_error = packetHandler.read2ByteTxRx(
|
||||
portHandler, SCS_ID, ADDR_SCS_PRESENT_POSITION
|
||||
)
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print(packetHandler.getTxRxResult(scs_comm_result))
|
||||
elif scs_error != 0:
|
||||
print(packetHandler.getRxPacketError(scs_error))
|
||||
|
||||
breakpoint()
|
||||
scs_present_position = SCS_LOWORD(scs_present_position)
|
||||
# scs_present_speed = SCS_HIWORD(scs_present_position_speed)
|
||||
# print("[ID:%03d] PresPos:%03d PresSpd:%03d" % (SCS_ID, scs_present_position, SCS_TOHOST(scs_present_speed, 15)))
|
||||
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
|
||||
|
||||
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
|
||||
scs_addparam_result = groupSyncRead.addParam(SCS_ID)
|
||||
if scs_addparam_result != True:
|
||||
print("[ID:%03d] groupSyncRead addparam failed" % SCS_ID)
|
||||
quit()
|
||||
|
||||
# Syncread present position
|
||||
scs_comm_result = groupSyncRead.txRxPacket()
|
||||
if scs_comm_result != COMM_SUCCESS:
|
||||
print("%s" % packetHandler.getTxRxResult(scs_comm_result))
|
||||
|
||||
# Check if groupsyncread data of SCServo#1 is available
|
||||
scs_getdata_result = groupSyncRead.isAvailable(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
if scs_getdata_result == True:
|
||||
# Get SCServo#1 present position value
|
||||
scs_present_position = groupSyncRead.getData(SCS_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
else:
|
||||
scs_present_position = 0
|
||||
print("[ID:%03d] groupSyncRead getdata failed" % SCS_ID)
|
||||
|
||||
# # Check if groupsyncread data of SCServo#2 is available
|
||||
# scs_getdata_result = groupSyncRead.isAvailable(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
# if scs_getdata_result == True:
|
||||
# # Get SCServo#2 present position value
|
||||
# scs2_present_position_speed = groupSyncRead.getData(SCS2_ID, ADDR_SCS_PRESENT_POSITION, 2)
|
||||
# else:
|
||||
# print("[ID:%03d] groupSyncRead getdata failed" % SCS2_ID)
|
||||
|
||||
scs_present_position = SCS_LOWORD(scs_present_position)
|
||||
print("[ID:%03d] PresPos:%03d" % (SCS_ID, scs_present_position))
|
||||
|
||||
|
||||
# Close port
|
||||
portHandler.closePort()
|
||||
@@ -1,45 +0,0 @@
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem1101"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
|
||||
def read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
}
|
||||
return d
|
||||
|
||||
# if ser.in_waiting > 0:
|
||||
# line = ser.readline().decode('utf-8').strip()
|
||||
# print(line)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
glove = HomonculusGlove()
|
||||
d = glove.read()
|
||||
lol = 1
|
||||
@@ -1,693 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# from qai_hub_models.models.mediapipe_hand.app import MediaPipeHandApp
|
||||
# from qai_hub_models.models.mediapipe_hand.model import (
|
||||
# MediaPipeHand,
|
||||
# )
|
||||
# from qai_hub_models.utils.image_processing import (
|
||||
# app_to_net_image_inputs,
|
||||
# )
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
import serial
|
||||
|
||||
|
||||
class HomonculusGlove:
|
||||
def __init__(self):
|
||||
self.serial_port = "/dev/tty.usbmodem1401"
|
||||
self.baud_rate = 115200
|
||||
self.serial = serial.Serial(self.serial_port, self.baud_rate, timeout=1)
|
||||
self.thread = threading.Thread(target=self.async_read)
|
||||
self.thread.start()
|
||||
self.last_d = {
|
||||
"thumb_0": 100,
|
||||
"thumb_1": 100,
|
||||
"thumb_2": 100,
|
||||
"thumb_3": 100,
|
||||
"index_0": 100,
|
||||
"index_1": 100,
|
||||
"index_2": 100,
|
||||
"middle_0": 100,
|
||||
"middle_1": 100,
|
||||
"middle_2": 100,
|
||||
"ring_0": 100,
|
||||
"ring_1": 100,
|
||||
"ring_2": 100,
|
||||
"pinky_0": 100,
|
||||
"pinky_1": 100,
|
||||
"pinky_2": 100,
|
||||
"battery_voltage": 100,
|
||||
}
|
||||
self.calibration = None
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return list(self.last_d.keys())
|
||||
|
||||
def read(self, motor_names: list[str] | None = None):
|
||||
if motor_names is None:
|
||||
motor_names = self.joint_names
|
||||
|
||||
values = np.array([self.last_d[k] for k in motor_names])
|
||||
|
||||
print(motor_names)
|
||||
print(values)
|
||||
|
||||
if self.calibration is not None:
|
||||
values = self.apply_calibration(values, motor_names)
|
||||
print(values)
|
||||
return values
|
||||
|
||||
def async_read(self):
|
||||
while True:
|
||||
if self.serial.in_waiting > 0:
|
||||
self.serial.flush()
|
||||
vals = self.serial.readline().decode("utf-8").strip()
|
||||
vals = vals.split(" ")
|
||||
if len(vals) != 17:
|
||||
continue
|
||||
vals = [int(val) for val in vals]
|
||||
|
||||
d = {
|
||||
"thumb_0": vals[0],
|
||||
"thumb_1": vals[1],
|
||||
"thumb_2": vals[2],
|
||||
"thumb_3": vals[3],
|
||||
"index_0": vals[4],
|
||||
"index_1": vals[5],
|
||||
"index_2": vals[6],
|
||||
"middle_0": vals[7],
|
||||
"middle_1": vals[8],
|
||||
"middle_2": vals[9],
|
||||
"ring_0": vals[10],
|
||||
"ring_1": vals[11],
|
||||
"ring_2": vals[12],
|
||||
"pinky_0": vals[13],
|
||||
"pinky_1": vals[14],
|
||||
"pinky_2": vals[15],
|
||||
"battery_voltage": vals[16],
|
||||
}
|
||||
self.last_d = d
|
||||
# print(d.values())
|
||||
|
||||
def run_calibration(self):
|
||||
print("\nMove arm to open position")
|
||||
input("Press Enter to continue...")
|
||||
open_pos_list = []
|
||||
for _ in range(300):
|
||||
open_pos = self.read()
|
||||
open_pos_list.append(open_pos)
|
||||
time.sleep(0.01)
|
||||
open_pos = np.array(open_pos_list)
|
||||
max_open_pos = open_pos.max(axis=0)
|
||||
min_open_pos = open_pos.min(axis=0)
|
||||
|
||||
print(f"{max_open_pos=}")
|
||||
print(f"{min_open_pos=}")
|
||||
|
||||
print("\nMove arm to closed position")
|
||||
input("Press Enter to continue...")
|
||||
closed_pos_list = []
|
||||
for _ in range(300):
|
||||
closed_pos = self.read()
|
||||
closed_pos_list.append(closed_pos)
|
||||
time.sleep(0.01)
|
||||
closed_pos = np.array(closed_pos_list)
|
||||
max_closed_pos = closed_pos.max(axis=0)
|
||||
closed_pos[closed_pos < 1000] = 60000
|
||||
min_closed_pos = closed_pos.min(axis=0)
|
||||
|
||||
print(f"{max_closed_pos=}")
|
||||
print(f"{min_closed_pos=}")
|
||||
|
||||
open_pos = np.array([max_open_pos, max_closed_pos]).max(axis=0)
|
||||
closed_pos = np.array([min_open_pos, min_closed_pos]).min(axis=0)
|
||||
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
# INVERTION
|
||||
for i, jname in enumerate(self.joint_names):
|
||||
if jname in [
|
||||
"thumb_0",
|
||||
"thumb_3",
|
||||
"index_2",
|
||||
"middle_2",
|
||||
"ring_2",
|
||||
"pinky_0",
|
||||
"pinky_2",
|
||||
"index_0",
|
||||
]:
|
||||
tmp_pos = open_pos[i]
|
||||
open_pos[i] = closed_pos[i]
|
||||
closed_pos[i] = tmp_pos
|
||||
|
||||
print()
|
||||
print(f"{open_pos=}")
|
||||
print(f"{closed_pos=}")
|
||||
|
||||
homing_offset = [0] * len(self.joint_names)
|
||||
drive_mode = [0] * len(self.joint_names)
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.joint_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": open_pos,
|
||||
"end_pos": closed_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.joint_names,
|
||||
}
|
||||
# return calib_dict
|
||||
self.set_calibration(calib_dict)
|
||||
|
||||
def set_calibration(self, calibration: dict[str, list]):
|
||||
self.calibration = calibration
|
||||
|
||||
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
|
||||
a "zero position" at 0 degree.
|
||||
|
||||
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
|
||||
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
|
||||
|
||||
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
|
||||
when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and
|
||||
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
|
||||
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
|
||||
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
|
||||
in the centered nominal degree range ]-180, 180[.
|
||||
"""
|
||||
if motor_names is None:
|
||||
motor_names = self.motor_names
|
||||
|
||||
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
|
||||
values = values.astype(np.float32)
|
||||
|
||||
for i, name in enumerate(motor_names):
|
||||
calib_idx = self.calibration["motor_names"].index(name)
|
||||
calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
start_pos = self.calibration["start_pos"][calib_idx]
|
||||
end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# Rescale the present position to a nominal range [0, 100] %,
|
||||
# useful for joints with linear motions like Aloha gripper
|
||||
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
|
||||
|
||||
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
|
||||
if name == "pinky_1" and (values[i] < LOWER_BOUND_LINEAR):
|
||||
values[i] = end_pos
|
||||
else:
|
||||
msg = (
|
||||
f"Wrong motor position range detected for {name}. "
|
||||
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
|
||||
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
|
||||
f"but present value is {values[i]} %. "
|
||||
"This might be due to a cable connection issue creating an artificial jump in motor values. "
|
||||
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
|
||||
)
|
||||
print(msg)
|
||||
# raise JointOutOfRangeError(msg)
|
||||
|
||||
return values
|
||||
|
||||
# def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
|
||||
# """Inverse of `apply_calibration`."""
|
||||
# if motor_names is None:
|
||||
# motor_names = self.motor_names
|
||||
|
||||
# for i, name in enumerate(motor_names):
|
||||
# calib_idx = self.calibration["motor_names"].index(name)
|
||||
# calib_mode = self.calibration["calib_mode"][calib_idx]
|
||||
|
||||
# if CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
|
||||
# start_pos = self.calibration["start_pos"][calib_idx]
|
||||
# end_pos = self.calibration["end_pos"][calib_idx]
|
||||
|
||||
# # Convert from nominal lnear range of [0, 100] % to
|
||||
# # actual motor range of values which can be arbitrary.
|
||||
# values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
|
||||
|
||||
# values = np.round(values).astype(np.int32)
|
||||
# return values
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem58760429571",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
"shoulder_pitch": [1, "sts3250"],
|
||||
"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
"elbow_flex": [4, "sts3250"],
|
||||
"wrist_roll": [5, "sts3215"],
|
||||
"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/tty.usbmodem585A0077581",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500,
|
||||
900,
|
||||
1000,
|
||||
0,
|
||||
100,
|
||||
250,
|
||||
750,
|
||||
100,
|
||||
400,
|
||||
150,
|
||||
100,
|
||||
120,
|
||||
980,
|
||||
100,
|
||||
950,
|
||||
750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
500 - 250,
|
||||
900 - 300,
|
||||
1000 - 550,
|
||||
0 + 550,
|
||||
1000,
|
||||
start_pos[5] + 500,
|
||||
start_pos[6] - 500,
|
||||
1000,
|
||||
400 + 700,
|
||||
150 + 700,
|
||||
1000,
|
||||
120 + 700,
|
||||
980 - 700,
|
||||
1000,
|
||||
950 - 700,
|
||||
750 - 700,
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
self.arm_bus.connect()
|
||||
self.hand_bus.connect()
|
||||
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
assert isinstance(frame, np.ndarray)
|
||||
|
||||
frame_count = frame_count + 1
|
||||
# mirror frame
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
||||
|
||||
def main():
|
||||
robot = HopeJuniorRobot()
|
||||
robot.connect()
|
||||
|
||||
# robot.hand_bus.calibration = None
|
||||
|
||||
# breakpoint()
|
||||
# print(robot.arm_bus.read("Present_Position"))
|
||||
robot.arm_bus.write("Torque_Enable", 1)
|
||||
robot.arm_bus.write("Acceleration", 20)
|
||||
robot.arm_bus.read("Acceleration")
|
||||
|
||||
calibration = robot.get_hand_calibration()
|
||||
robot.hand_bus.write("Goal_Position", calibration["start_pos"])
|
||||
# robot.hand_bus.write("Goal_Position", calibration["end_pos"][:4], robot.hand_bus.motor_names[:4])
|
||||
robot.hand_bus.set_calibration(calibration)
|
||||
lol = 1
|
||||
|
||||
# # print(motors_bus.write("Goal_Position", 500))
|
||||
# print(robot.hand_bus.read("Present_Position"))
|
||||
# # pos = hand_bus.read("Present_Position")
|
||||
# # hand_bus.write("Goal_Position", pos[0]+20, hand_bus.motor_names[0])
|
||||
# # hand_bus.write("Goal_Position", pos[i]+delta, hand_bus.motor_names[i])
|
||||
# robot.hand_bus.read("Acceleration")
|
||||
# robot.hand_bus.write("Acceleration", 10)
|
||||
|
||||
# sleep = 1
|
||||
# # robot.hand_bus.write(
|
||||
# # "Goal_Position", [glove.last_d['index_2']-1500,300,300], ["index_pinky_side", "index_flexor", "index_thumb_side"]
|
||||
# # )
|
||||
# #time.sleep(sleep)
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [100, 0, 0], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
# robot.hand_bus.write(
|
||||
# "Goal_Position", [200, 100, 600], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
# )
|
||||
# time.sleep(sleep)
|
||||
|
||||
# breakpoint()
|
||||
|
||||
glove = HomonculusGlove()
|
||||
glove.run_calibration()
|
||||
# while True:
|
||||
# joint_names = ["index_1", "index_2"]
|
||||
# joint_values = glove.read(joint_names)
|
||||
# print(joint_values)
|
||||
|
||||
input()
|
||||
while True:
|
||||
joint_names = []
|
||||
# joint_names += ["thumb_0", "thumb_2", "thumb_3"]
|
||||
joint_names += ["index_0", "index_1"]
|
||||
# joint_names += ["middle_1", "middle_2"]
|
||||
# joint_names += ["ring_1", "ring_2"]
|
||||
# joint_names += ["pinky_0", "pinky_2"]
|
||||
joint_values = glove.read(joint_names)
|
||||
joint_values = joint_values.round().astype(int)
|
||||
joint_dict = {k: v for k, v in zip(joint_names, joint_values, strict=False)}
|
||||
|
||||
motor_values = []
|
||||
motor_names = []
|
||||
# motor_names += ["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"]
|
||||
# motor_values += [joint_dict["thumb_3"], joint_dict["thumb_0"], joint_dict["thumb_2"], joint_dict["thumb_2"]]
|
||||
motor_names += ["index_pinky_side", "index_thumb_side"]
|
||||
# if joint_dict["index_0"] -2100 > 0:
|
||||
splayamount = 0.5
|
||||
motor_values += [
|
||||
(100 - joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
|
||||
(joint_dict["index_0"]) * splayamount + joint_dict["index_1"] * (1 - splayamount),
|
||||
]
|
||||
# else:
|
||||
# motor_values += [100-joint_dict["index_0"], joint_dict["index_0"]]
|
||||
|
||||
# motor_names += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
# motor_values += [joint_dict["middle_2"], joint_dict["middle_1"], joint_dict["middle_1"]]
|
||||
# motor_names += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
# motor_values += [joint_dict["ring_2"], joint_dict["ring_1"], joint_dict["ring_1"]]
|
||||
# motor_names += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
# motor_values += [joint_dict["pinky_2"], joint_dict["pinky_0"], joint_dict["pinky_0"]]
|
||||
|
||||
motor_values = np.array(motor_values)
|
||||
motor_values = np.clip(motor_values, 0, 100)
|
||||
|
||||
robot.hand_bus.write("Goal_Position", motor_values, motor_names)
|
||||
time.sleep(0.02)
|
||||
|
||||
while True:
|
||||
# print(glove.read()['index_2']-1500)
|
||||
glove_index_flexor = glove.read()["index_2"] - 1500
|
||||
glove_index_subflexor = glove.read()["index_1"] - 1500
|
||||
glove_index_side = glove.read()["index_0"] - 2100
|
||||
|
||||
vals = [glove_index_flexor, 1000 - (glove_index_subflexor), glove_index_subflexor]
|
||||
|
||||
keys = ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
|
||||
glove_middle_flexor = glove.read()["middle_2"] - 1500
|
||||
glove_middle_subflexor = 1000 - (glove.read()["middle_1"] - 1700)
|
||||
vals += [glove_middle_flexor, glove_middle_subflexor, glove_middle_subflexor - 200]
|
||||
keys += ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
|
||||
glove_ring_flexor = glove.read()["ring_2"] - 1300
|
||||
print(glove_ring_flexor)
|
||||
glove_ring_subflexor = glove.read()["ring_1"] - 1100
|
||||
|
||||
vals += [glove_ring_flexor, 1000 - glove_ring_subflexor, glove_ring_subflexor]
|
||||
keys += ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
|
||||
glove_pinky_flexor = glove.read()["pinky_2"] - 1500
|
||||
glove_pinky_subflexor = glove.read()["pinky_1"] - 1300
|
||||
vals += [300 + glove_pinky_flexor, max(1000 - glove_pinky_subflexor - 100, 0), glove_pinky_subflexor]
|
||||
keys += ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
|
||||
robot.hand_bus.write("Goal_Position", vals, keys)
|
||||
time.sleep(0.1)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
def move_arm(loop=10):
|
||||
sleep = 1
|
||||
for i in range(loop):
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 2195])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1457, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 2357, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 974, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 2674, 1957, 1695])
|
||||
time.sleep(sleep + 2)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 1369, 1632, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 1330, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [2381, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1681, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
robot.arm_bus.write("Goal_Position", [1981, 2030, 2069, 2032, 1874, 1957, 1695])
|
||||
time.sleep(sleep)
|
||||
|
||||
def move_hand(loop=10):
|
||||
sleep = 0.5
|
||||
for i in range(loop):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000, 0, 1000],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 100, 100], ["index_flexor", "index_pinky_side", "index_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [100, 1000, 150], ["middle_flexor", "middle_pinky_side", "middle_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 200, 0], ["ring_flexor", "ring_pinky_side", "ring_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [200, 100, 700], ["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"]
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[500, 1000 - 250, 0 + 300, 1000 - 200],
|
||||
["thumb_basel_rotation", "thumb_flexor", "thumb_pinky_side", "thumb_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 450, 100 + 400, 100 + 400],
|
||||
["index_flexor", "index_pinky_side", "index_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[100 + 350, 1000 - 450, 150 + 450],
|
||||
["middle_flexor", "middle_pinky_side", "middle_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 650, 200 + 350, 0 + 350],
|
||||
["ring_flexor", "ring_pinky_side", "ring_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position",
|
||||
[200 + 450, 100 + 400, 700 - 400],
|
||||
["pinky_flexor", "pinky_pinky_side", "pinky_thumb_side"],
|
||||
)
|
||||
time.sleep(sleep)
|
||||
|
||||
move_hand(3)
|
||||
|
||||
move_arm(1)
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
executor.submit(move_arm)
|
||||
executor.submit(move_hand)
|
||||
|
||||
# initial position
|
||||
for i in range(3):
|
||||
robot.hand_bus.write(
|
||||
"Goal_Position", [500, 1000, 0, 1000, 100, 950, 100, 100, 1000, 150, 200, 200, 0, 200, 100, 700]
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# for i in range(3):
|
||||
# robot.hand_bus.write("Goal_Position", [500, 1000-150, 0+250, 1000-150,
|
||||
# 100+300, 950-250, 100+250,
|
||||
# 100+200, 1000-300, 150+300,
|
||||
# 200+500, 200+200, 0+200,
|
||||
# 200+300, 100+200, 700-200])
|
||||
# time.sleep(1)
|
||||
|
||||
# camera = 0
|
||||
# score_threshold = 0.95
|
||||
# iou_threshold = 0.3
|
||||
|
||||
# app = MediaPipeHandApp(MediaPipeHand.from_pretrained(), score_threshold, iou_threshold)
|
||||
|
||||
# def frame_processor(frame: np.ndarray) -> np.ndarray:
|
||||
# # Input Prep
|
||||
# NHWC_int_numpy_frames, NCHW_fp32_torch_frames = app_to_net_image_inputs(frame)
|
||||
|
||||
# # Run Bounding Box & Keypoint Detector
|
||||
# batched_selected_boxes, batched_selected_keypoints = app._run_box_detector(NCHW_fp32_torch_frames)
|
||||
|
||||
# # The region of interest ( bounding box of 4 (x, y) corners).
|
||||
# # list[torch.Tensor(shape=[Num Boxes, 4, 2])],
|
||||
# # where 2 == (x, y)
|
||||
# #
|
||||
# # A list element will be None if there is no selected ROI.
|
||||
# batched_roi_4corners = app._compute_object_roi(batched_selected_boxes, batched_selected_keypoints)
|
||||
|
||||
# # selected landmarks for the ROI (if any)
|
||||
# # list[torch.Tensor(shape=[Num Selected Landmarks, K, 3])],
|
||||
# # where K == number of landmark keypoints, 3 == (x, y, confidence)
|
||||
# #
|
||||
# # A list element will be None if there is no ROI.
|
||||
# landmarks_out = app._run_landmark_detector(NHWC_int_numpy_frames, batched_roi_4corners)
|
||||
|
||||
# app._draw_predictions(
|
||||
# NHWC_int_numpy_frames,
|
||||
# batched_selected_boxes,
|
||||
# batched_selected_keypoints,
|
||||
# batched_roi_4corners,
|
||||
# *landmarks_out,
|
||||
# )
|
||||
|
||||
# return NHWC_int_numpy_frames[0]
|
||||
|
||||
# capture_and_display_processed_frames(frame_processor, "QAIHM Mediapipe Hand Demo", camera)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,97 +0,0 @@
|
||||
#faulty servo
|
||||
Model = [777]
|
||||
ID = [7]
|
||||
Baud_Rate = [0]
|
||||
Return_Delay = [0]
|
||||
Response_Status_Level = [1]
|
||||
Min_Angle_Limit = [1140]
|
||||
Max_Angle_Limit = [2750]
|
||||
Max_Temperature_Limit = [70]
|
||||
Max_Voltage_Limit = [140]
|
||||
Min_Voltage_Limit = [40]
|
||||
Max_Torque_Limit = [1000]
|
||||
Phase = [12]
|
||||
Unloading_Condition = [44]
|
||||
LED_Alarm_Condition = [47]
|
||||
P_Coefficient = [32]
|
||||
D_Coefficient = [32]
|
||||
I_Coefficient = [0]
|
||||
Minimum_Startup_Force = [16]
|
||||
CW_Dead_Zone = [1]
|
||||
CCW_Dead_Zone = [1]
|
||||
Protection_Current = [310]
|
||||
Angular_Resolution = [1]
|
||||
Offset = [1047]
|
||||
Mode = [0]
|
||||
Protective_Torque = [20]
|
||||
Protection_Time = [200]
|
||||
Overload_Torque = [80]
|
||||
Speed_closed_loop_P_proportional_coefficient = [10]
|
||||
Over_Current_Protection_Time = [200]
|
||||
Velocity_closed_loop_I_integral_coefficient = [200]
|
||||
Torque_Enable = [1]
|
||||
Acceleration = [20]
|
||||
Goal_Position = [0]
|
||||
Goal_Time = [0]
|
||||
Goal_Speed = [0]
|
||||
Torque_Limit = [1000]
|
||||
Lock = [1]
|
||||
Present_Position = [1494]
|
||||
Present_Speed = [0]
|
||||
Present_Load = [0]
|
||||
Present_Voltage = [123]
|
||||
Present_Temperature = [24]
|
||||
Status = [0]
|
||||
Moving = [0]
|
||||
Present_Current = [0]
|
||||
Maximum_Acceleration = [306]
|
||||
|
||||
|
||||
|
||||
#all servos of hopejr
|
||||
Model = [2825 777 777 2825 777 777 777]
|
||||
ID = [1 2 3 4 5 6 7]
|
||||
Baud_Rate = [0 0 0 0 0 0 0]
|
||||
Return_Delay = [0 0 0 0 0 0 0]
|
||||
Response_Status_Level = [1 1 1 1 1 1 1]
|
||||
Min_Angle_Limit = [ 650 1300 1300 1200 600 1725 0]
|
||||
Max_Angle_Limit = [2600 2050 2800 2500 4096 2250 4095]
|
||||
Max_Temperature_Limit = [80 70 70 80 70 70 70]
|
||||
Max_Voltage_Limit = [160 140 140 160 140 140 80]
|
||||
Min_Voltage_Limit = [60 40 40 60 40 40 40]
|
||||
Max_Torque_Limit = [1000 1000 1000 1000 1000 1000 1000]
|
||||
Phase = [12 12 12 12 12 12 12]
|
||||
Unloading_Condition = [45 44 44 45 44 44 44]
|
||||
LED_Alarm_Condition = [45 47 47 45 47 47 47]
|
||||
P_Coefficient = [32 32 32 32 32 32 32]
|
||||
D_Coefficient = [32 32 32 32 32 32 32]
|
||||
I_Coefficient = [0 0 0 0 0 0 0]
|
||||
Minimum_Startup_Force = [15 16 16 12 16 16 16]
|
||||
CW_Dead_Zone = [0 1 1 0 1 1 1]
|
||||
CCW_Dead_Zone = [0 1 1 0 1 1 1]
|
||||
Protection_Current = [310 310 310 310 310 310 500]
|
||||
Angular_Resolution = [1 1 1 1 1 1 1]
|
||||
Offset = [ 0 1047 1024 1047 1024 1024 0]
|
||||
Mode = [0 0 0 0 0 0 0]
|
||||
Protective_Torque = [20 20 20 20 20 20 20]
|
||||
Protection_Time = [200 200 200 200 200 200 200]
|
||||
Overload_Torque = [80 80 80 80 80 80 80]
|
||||
Speed_closed_loop_P_proportional_coefficient = [10 10 10 10 10 10 10]
|
||||
Over_Current_Protection_Time = [250 200 200 250 200 200 200]
|
||||
Velocity_closed_loop_I_integral_coefficient = [200 200 200 200 200 200 200]
|
||||
Torque_Enable = [1 1 1 1 1 1 1]
|
||||
Acceleration = [20 20 20 20 20 20 20]
|
||||
Goal_Position = [1909 1977 1820 1000 707 1941 1036]
|
||||
Goal_Time = [0 0 0 0 0 0 0]
|
||||
Goal_Speed = [0 0 0 0 0 0 0]
|
||||
Torque_Limit = [1000 1000 1000 200 1000 1000 1000]
|
||||
Lock = [1 1 1 1 1 1 1]
|
||||
Present_Position = [1909 1982 1821 1200 710 1941 1036]
|
||||
Present_Speed = [0 0 0 0 0 0 0]
|
||||
Present_Load = [ 0 48 0 0 32 0 0]
|
||||
Present_Voltage = [122 123 122 123 122 122 122]
|
||||
Present_Temperature = [23 28 28 26 29 28 28]
|
||||
Status = [0 0 0 0 0 0 1]
|
||||
Moving = [0 0 0 0 0 0 0]
|
||||
Present_Current = [0 1 0 1 1 0 1]
|
||||
Maximum_Acceleration = [1530 306 306 1530 306 306 254]
|
||||
@@ -1,192 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import serial
|
||||
|
||||
from lerobot.common.robot_devices.motors.feetech import (
|
||||
CalibrationMode,
|
||||
FeetechMotorsBus,
|
||||
)
|
||||
|
||||
LOWER_BOUND_LINEAR = -100
|
||||
UPPER_BOUND_LINEAR = 200
|
||||
|
||||
ESCAPE_KEY_ID = 27
|
||||
|
||||
|
||||
class HopeJuniorRobot:
|
||||
def __init__(self):
|
||||
self.arm_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM1",
|
||||
motors={
|
||||
# "motor1": (2, "sts3250"),
|
||||
# "motor2": (1, "scs0009"),
|
||||
#"shoulder_pitch": [1, "sts3250"],
|
||||
#"shoulder_yaw": [2, "sts3215"], # TODO: sts3250
|
||||
#"shoulder_roll": [3, "sts3215"], # TODO: sts3250
|
||||
#"elbow_flex": [4, "sts3250"],
|
||||
#"wrist_roll": [5, "sts3215"],
|
||||
#"wrist_yaw": [6, "sts3215"],
|
||||
"wrist_pitch": [7, "sts3215"],
|
||||
},
|
||||
protocol_version=0,
|
||||
)
|
||||
self.hand_bus = FeetechMotorsBus(
|
||||
port="/dev/ttyACM1",
|
||||
motors={
|
||||
"thumb_basel_rotation": [30, "scs0009"],
|
||||
"thumb_flexor": [27, "scs0009"],
|
||||
"thumb_pinky_side": [26, "scs0009"],
|
||||
"thumb_thumb_side": [28, "scs0009"],
|
||||
"index_flexor": [25, "scs0009"],
|
||||
"index_pinky_side": [31, "scs0009"],
|
||||
"index_thumb_side": [32, "scs0009"],
|
||||
"middle_flexor": [24, "scs0009"],
|
||||
"middle_pinky_side": [33, "scs0009"],
|
||||
"middle_thumb_side": [34, "scs0009"],
|
||||
"ring_flexor": [21, "scs0009"],
|
||||
"ring_pinky_side": [36, "scs0009"],
|
||||
"ring_thumb_side": [35, "scs0009"],
|
||||
"pinky_flexor": [23, "scs0009"],
|
||||
"pinky_pinky_side": [38, "scs0009"],
|
||||
"pinky_thumb_side": [37, "scs0009"],
|
||||
},
|
||||
protocol_version=1,
|
||||
group_sync_read=False,
|
||||
)
|
||||
|
||||
def get_hand_calibration(self):
|
||||
"""
|
||||
Returns a dictionary containing calibration settings for each motor
|
||||
on the hand bus.
|
||||
"""
|
||||
homing_offset = [0] * len(self.hand_bus.motor_names)
|
||||
drive_mode = [0] * len(self.hand_bus.motor_names)
|
||||
|
||||
start_pos = [
|
||||
500, 900, 0, 1000, 100, 250, 750, 100, 400, 150, 100, 120, 980, 100, 950, 750,
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
start_pos[0] - 400, # 500 - 400 = 100
|
||||
start_pos[1] - 300, # 900 - 300 = 600
|
||||
start_pos[2] + 550, # 0 + 550 = 550
|
||||
start_pos[3] - 550, # 1000 - 550 = 450
|
||||
start_pos[4] + 900, # 100 + 900 = 1000
|
||||
start_pos[5] + 500, # 250 + 500 = 750
|
||||
start_pos[6] - 500, # 750 - 500 = 250
|
||||
start_pos[7] + 900, # 100 + 900 = 1000
|
||||
start_pos[8] + 700, # 400 + 700 = 1100
|
||||
start_pos[9] + 700, # 150 + 700 = 850
|
||||
start_pos[10] + 900, # 100 + 900 = 1000
|
||||
start_pos[11] + 700, # 120 + 700 = 820
|
||||
start_pos[12] - 700, # 980 - 700 = 280
|
||||
start_pos[13] + 900, # 100 + 900 = 1000
|
||||
start_pos[14] - 700, # 950 - 700 = 250
|
||||
start_pos[15] - 700, # 750 - 700 = 50
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.hand_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.hand_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def get_arm_calibration(self):
|
||||
"""
|
||||
Returns a dictionary containing calibration settings for each motor
|
||||
on the arm bus.
|
||||
"""
|
||||
homing_offset = [0] * len(self.arm_bus.motor_names)
|
||||
drive_mode = [0] * len(self.arm_bus.motor_names)
|
||||
|
||||
# Example placeholders
|
||||
start_pos = [
|
||||
600, # shoulder_up
|
||||
1500, # shoulder_forward
|
||||
1300, # shoulder_yaw
|
||||
1000, # bend_elbow
|
||||
1600, # wrist_roll
|
||||
1700, # wrist_yaw
|
||||
600, # wrist_pitch
|
||||
]
|
||||
|
||||
end_pos = [
|
||||
2300, # shoulder_up
|
||||
2300, # shoulder_forward
|
||||
2800, # shoulder_yaw
|
||||
2500, # bend_elbow
|
||||
2800, # wrist_roll
|
||||
2200, # wrist_yaw
|
||||
1700, # wrist_pitch
|
||||
]
|
||||
|
||||
calib_modes = [CalibrationMode.LINEAR.name] * len(self.arm_bus.motor_names)
|
||||
|
||||
calib_dict = {
|
||||
"homing_offset": homing_offset,
|
||||
"drive_mode": drive_mode,
|
||||
"start_pos": start_pos,
|
||||
"end_pos": end_pos,
|
||||
"calib_mode": calib_modes,
|
||||
"motor_names": self.arm_bus.motor_names,
|
||||
}
|
||||
return calib_dict
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the Feetech buses."""
|
||||
self.arm_bus.connect()
|
||||
# self.hand_bus.connect()
|
||||
|
||||
|
||||
def capture_and_display_processed_frames(
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray],
|
||||
window_display_name: str,
|
||||
cap_device: int = 0,
|
||||
) -> None:
|
||||
"""
|
||||
Capture frames from the given input camera device, run them through
|
||||
the frame processor, and display the outputs in a window with the given name.
|
||||
|
||||
User should press Esc to exit.
|
||||
|
||||
Inputs:
|
||||
frame_processor: Callable[[np.ndarray], np.ndarray]
|
||||
Processes frames.
|
||||
Input and output are numpy arrays of shape (H W C) with BGR channel layout and dtype uint8 / byte.
|
||||
window_display_name: str
|
||||
Name of the window used to display frames.
|
||||
cap_device: int
|
||||
Identifier for the camera to use to capture frames.
|
||||
"""
|
||||
cv2.namedWindow(window_display_name)
|
||||
capture = cv2.VideoCapture(cap_device)
|
||||
if not capture.isOpened():
|
||||
raise ValueError("Unable to open video capture.")
|
||||
|
||||
frame_count = 0
|
||||
has_frame, frame = capture.read()
|
||||
while has_frame:
|
||||
frame_count = frame_count + 1
|
||||
# Mirror frame horizontally and flip color for demonstration
|
||||
frame = np.ascontiguousarray(frame[:, ::-1, ::-1])
|
||||
|
||||
# process & show frame
|
||||
processed_frame = frame_processor(frame)
|
||||
cv2.imshow(window_display_name, processed_frame[:, :, ::-1])
|
||||
|
||||
has_frame, frame = capture.read()
|
||||
key = cv2.waitKey(1)
|
||||
if key == ESCAPE_KEY_ID:
|
||||
break
|
||||
|
||||
capture.release()
|
||||
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 56 KiB |
@@ -1,44 +0,0 @@
|
||||
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
from typing import List, Tuple
|
||||
def log_and_plot_params(bus, params_to_log: list, servo_names: list,
|
||||
test_id="servo_log", interval=0.1, duration=5, save_plot=True) -> Tuple[dict, List[float]]:
|
||||
|
||||
"""
|
||||
Logs specific servo parameters for a given duration and generates a plot.
|
||||
"""
|
||||
|
||||
servo_data = {servo_name: {param: [] for param in params_to_log} for servo_name in servo_names}
|
||||
timestamps = []
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < duration:
|
||||
timestamp = time.time() - start_time
|
||||
timestamps.append(timestamp)
|
||||
for param in params_to_log:
|
||||
values = bus.read(param, servo_names)
|
||||
for servo_name, value in zip(servo_names, values):
|
||||
servo_data[servo_name][param].append(value)
|
||||
|
||||
time.sleep(interval)
|
||||
|
||||
if save_plot:
|
||||
for servo_name, data in servo_data.items():
|
||||
plt.figure(figsize=(10, 6))
|
||||
for param in params_to_log:
|
||||
if all(v is not None for v in data[param]):
|
||||
plt.plot(timestamps, data[param], label=param)
|
||||
plt.xlabel("Time (s)")
|
||||
plt.ylabel("Parameter Values")
|
||||
plt.title(f"Parameter Trends for Servo: {servo_name}")
|
||||
plt.legend()
|
||||
plt.grid(True)
|
||||
plt.tight_layout()
|
||||
plot_filename = f"{test_id}_{servo_name}.png"
|
||||
plt.savefig(plot_filename)
|
||||
print(f"Plot saved as {plot_filename}")
|
||||
|
||||
return servo_data, timestamps
|
||||
|
Before Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 54 KiB |
@@ -1,68 +0,0 @@
|
||||
STS_SERIES_CONTROL_TABLE = {
|
||||
"Model": (3, 2),
|
||||
"ID": (5, 1),
|
||||
"Baud_Rate": (6, 1),
|
||||
"Return_Delay": (7, 1),
|
||||
"Response_Status_Level": (8, 1),
|
||||
"Min_Angle_Limit": (9, 2),
|
||||
"Max_Angle_Limit": (11, 2),
|
||||
"Max_Temperature_Limit": (13, 1),
|
||||
"Max_Voltage_Limit": (14, 1),
|
||||
"Min_Voltage_Limit": (15, 1),
|
||||
"Max_Torque_Limit": (16, 2),
|
||||
"Phase": (18, 1),
|
||||
"Unloading_Condition": (19, 1),
|
||||
"LED_Alarm_Condition": (20, 1),
|
||||
"P_Coefficient": (21, 1),
|
||||
"D_Coefficient": (22, 1),
|
||||
"I_Coefficient": (23, 1),
|
||||
"Minimum_Startup_Force": (24, 2),
|
||||
"CW_Dead_Zone": (26, 1),
|
||||
"CCW_Dead_Zone": (27, 1),
|
||||
"Protection_Current": (28, 2),
|
||||
"Angular_Resolution": (30, 1),
|
||||
"Offset": (31, 2),
|
||||
"Mode": (33, 1),
|
||||
"Protective_Torque": (34, 1),
|
||||
"Protection_Time": (35, 1),
|
||||
"Overload_Torque": (36, 1),
|
||||
"Speed_closed_loop_P_proportional_coefficient": (37, 1),
|
||||
"Over_Current_Protection_Time": (38, 1),
|
||||
"Velocity_closed_loop_I_integral_coefficient": (39, 1),
|
||||
"Torque_Enable": (40, 1),
|
||||
"Acceleration": (41, 1),
|
||||
"Goal_Position": (42, 2),
|
||||
"Goal_Time": (44, 2),
|
||||
"Goal_Speed": (46, 2),
|
||||
"Torque_Limit": (48, 2),
|
||||
"Lock": (55, 1),
|
||||
"Present_Position": (56, 2),
|
||||
"Present_Speed": (58, 2),
|
||||
"Present_Load": (60, 2),
|
||||
"Present_Voltage": (62, 1),
|
||||
"Present_Temperature": (63, 1),
|
||||
"Status": (65, 1),
|
||||
"Moving": (66, 1),
|
||||
"Present_Current": (69, 2),
|
||||
# Not in the Memory Table
|
||||
"Maximum_Acceleration": (85, 2),
|
||||
}
|
||||
|
||||
import time
|
||||
|
||||
# Assuming STS_SERIES_CONTROL_TABLE is defined globally
|
||||
|
||||
def print_all_params(robot):
|
||||
"""
|
||||
Reads all parameters from the STS_SERIES_CONTROL_TABLE and prints their values.
|
||||
"""
|
||||
for param in STS_SERIES_CONTROL_TABLE.keys():
|
||||
try:
|
||||
val = robot.arm_bus.read(param)
|
||||
print(f"{param} = {val}")
|
||||
except Exception as e:
|
||||
print(f"{param} read failed: {e}")
|
||||
|
||||
|
||||
# Example usage:
|
||||
print_all_params(robot)
|
||||
@@ -1,26 +0,0 @@
|
||||
#include <DFRobot_VisualRotaryEncoder.h>
|
||||
|
||||
DFRobot_VisualRotaryEncoder_I2C sensor(0x54, &Wire);
|
||||
|
||||
void setup()
|
||||
{
|
||||
Serial.begin(115200);
|
||||
|
||||
// Attempt to initialize the sensor
|
||||
while (NO_ERR != sensor.begin()) {
|
||||
// Failed? Just wait a bit and try again
|
||||
delay(3000);
|
||||
}
|
||||
}
|
||||
|
||||
void loop()
|
||||
{
|
||||
// Read the encoder value
|
||||
uint16_t encoderValue = sensor.getEncoderValue();
|
||||
|
||||
// Print it followed by a newline
|
||||
Serial.println(encoderValue);
|
||||
|
||||
// Delay 10ms between readings
|
||||
delay(10);
|
||||
}
|
||||