You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "train.py", line 43, in
train(args)
File "train.py", line 38, in train
ppo_runner, train_cfg = task_registry.make_alg_runner(env=env, name=args.task, args=args)
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/utils/task_registry.py", line 152, in make_alg_runner
runner = runner_class(env, all_cfg, log_dir, device=args.rl_device)
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/algo/ppo/on_policy_runner.py", line 91, in init
_, _ = self.env.reset()
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 115, in reset
obs, privileged_obs, _, _, _ = self.step(torch.zeros(
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/custom/humanoid_env.py", line 197, in step
return super().step(actions)
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 102, in step
self.post_physics_step()
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 142, in post_physics_step
self.compute_reward()
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 226, in compute_reward
rew = self.reward_functionsi * self.reward_scales[name]
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/custom/humanoid_env.py", line 343, in _reward_feet_contact_number
reward = torch.where(contact == stance_mask, 1, -0.3)
RuntimeError: expected scalar type long int but found float
####################################################
It seems that torch.where is something wrong , can u help me to solve it?
The text was updated successfully, but these errors were encountered:
Have you changed the definitions of contact or stance_mask? Does this happen with the default settings? Try this line: reward = torch.where(contact == stance_mask, 1.0, -0.3). Thank you for your feedback.
Traceback (most recent call last):
File "train.py", line 43, in
train(args)
File "train.py", line 38, in train
ppo_runner, train_cfg = task_registry.make_alg_runner(env=env, name=args.task, args=args)
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/utils/task_registry.py", line 152, in make_alg_runner
runner = runner_class(env, all_cfg, log_dir, device=args.rl_device)
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/algo/ppo/on_policy_runner.py", line 91, in init
_, _ = self.env.reset()
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 115, in reset
obs, privileged_obs, _, _, _ = self.step(torch.zeros(
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/custom/humanoid_env.py", line 197, in step
return super().step(actions)
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 102, in step
self.post_physics_step()
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 142, in post_physics_step
self.compute_reward()
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/base/legged_robot.py", line 226, in compute_reward
rew = self.reward_functionsi * self.reward_scales[name]
File "/home/yyds/桌面/Gym5_human/humanoid-gym-main/humanoid/envs/custom/humanoid_env.py", line 343, in _reward_feet_contact_number
reward = torch.where(contact == stance_mask, 1, -0.3)
RuntimeError: expected scalar type long int but found float
####################################################
It seems that torch.where is something wrong , can u help me to solve it?
The text was updated successfully, but these errors were encountered: