Skip to content

Commit

Permalink
works well for brush
Browse files Browse the repository at this point in the history
  • Loading branch information
NishanthJKumar committed Jul 25, 2023
1 parent cbb8b81 commit d7ea978
Show file tree
Hide file tree
Showing 4 changed files with 92 additions and 29 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
{
"objects": {
"tool_room_table": "flat_surface",
"brush": "tool",
"hex_screwdriver": "tool",
"floor": "floor",
"toolbag": "bag",
"low_wall_rack": "flat_surface",
"extra_room_table": "flat_surface",
"spot": "robot"
},
"init": {
"tool_room_table": {
"x": 6.636010845153006,
"y": -6.357507003722243,
"z": 0.16688691892637877
},
"brush": {
"x": 6.628450640793668,
"y": -6.134070805279482,
"z": 0.275153814024837,
"lost": 0.0,
"in_view": 1.0
},
"hex_screwdriver": {
"x": 6.743202704838895,
"y": -6.072205625637715,
"z": 0.3397934340691954,
"lost": 0.0,
"in_view": 1.0
},
"floor": {
"x": 0.0,
"y": 0.0,
"z": -1.0
},
"toolbag": {
"x": 7.574526143145762,
"y": -8.228299049853604,
"z": -0.23102625581230768
},
"low_wall_rack": {
"x": 9.861474481688553,
"y": -6.941164583352312,
"z": 0.211171693916895
},
"extra_room_table": {
"x": 8.248756607856741,
"y": -6.239555414972237,
"z": -0.04317698991113854
},
"spot": {
"gripper_open_percentage": 1.925736665725708,
"curr_held_item_id": 0,
"x": 8.340683958991244,
"y": -6.996395561153911,
"z": 0.13309655644432755,
"yaw": 0.018622984511019496
}
},
"goal": {
"InBag": [
[
"brush",
"toolbag"
]
]
}
}
11 changes: 6 additions & 5 deletions predicators/envs/spot_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -1025,8 +1025,8 @@ def _generate_task_goal(self) -> Set[GroundAtom]:
cube = self._obj_name_to_obj("cube")
extra_table = self._obj_name_to_obj("extra_room_table")
return {GroundAtom(self._On, [cube, extra_table])}
hammer = self._obj_name_to_obj("hammer")
hex_key = self._obj_name_to_obj("hex_key")
# hammer = self._obj_name_to_obj("hammer")
# hex_key = self._obj_name_to_obj("hex_key")
brush = self._obj_name_to_obj("brush")
hex_screwdriver = self._obj_name_to_obj("hex_screwdriver")
bag = self._obj_name_to_obj("toolbag")
Expand All @@ -1044,11 +1044,12 @@ def _make_object_name_to_obj_dict(self) -> Dict[str, Object]:
cube = Object("cube", self._tool_type)
objects.append(cube)
else:
hammer = Object("hammer", self._tool_type)
hex_key = Object("hex_key", self._tool_type)
# hammer = Object("hammer", self._tool_type)
# hex_key = Object("hex_key", self._tool_type)
hex_screwdriver = Object("hex_screwdriver", self._tool_type)
brush = Object("brush", self._tool_type)
objects.extend([hammer, hex_key, hex_screwdriver, brush])
# objects.extend([hammer, hex_key, hex_screwdriver, brush])
objects.extend([hex_screwdriver, brush])
spot = Object("spot", self._robot_type)
tool_room_table = Object("tool_room_table", self._surface_type)
extra_room_table = Object("extra_room_table", self._surface_type)
Expand Down
33 changes: 15 additions & 18 deletions predicators/spot_utils/perception_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,34 +325,30 @@ def get_xyz_from_depth(image_response: bosdyn.api.image_pb2.ImageResponse,


def get_pixel_locations_with_detic_sam(
classes: List[str],
obj_class: str,
in_res_image: Dict[str, np.ndarray],
plot: bool = False) -> List[Tuple[float, float]]:
"""Method to get the pixel locations of specific objects with class names
listed in 'classes' within an input image."""
res_segment = query_detic_sam(image_in=in_res_image['rgb'],
classes=classes,
classes=[obj_class],
viz=plot)
# return: 'masks', 'boxes', 'classes'
if res_segment is None:
if len(res_segment['classes']) == 0:
return []

obj_num = len(res_segment['masks'])
assert obj_num == 1

assert res_segment['classes'].count(obj_class) == 1
pixel_locations = []

# Detect multiple objects with their masks
for i in range(obj_num):
# Compute geometric center of object bounding box
x1, y1, x2, y2 = res_segment['boxes'][i]
x_c = (x1 + x2) / 2
y_c = (y1 + y2) / 2
# Plot center and segmentation mask
if plot:
plt.imshow(res_segment['masks'][i][0])
plt.show()
pixel_locations.append((x_c, y_c))
# Compute geometric center of object bounding box
x1, y1, x2, y2 = res_segment['boxes'][0].squeeze()
x_c = (x1 + x2) / 2
y_c = (y1 + y2) / 2
# Plot center and segmentation mask
if plot:
plt.imshow(res_segment['masks'][0][0].squeeze())
plt.show()
pixel_locations.append((x_c, y_c))

return pixel_locations

Expand Down Expand Up @@ -464,6 +460,7 @@ def get_object_locations_with_detic_sam(
depth_value=depth_median,
point_x=x_c_rotated,
point_y=y_c_rotated)
ret_obj_positions[obj_class.item()] = (x0, y0, z0)
if x0 != float('nan') and y0 != float("nan") and z0 != float("nan"):
ret_obj_positions[obj_class.item()] = (x0, y0, z0)

return ret_obj_positions
8 changes: 2 additions & 6 deletions predicators/spot_utils/spot_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ def get_sam_object_loc_from_camera(
transformed_location_dict[obj_class] = object_rt_gn_origin

# Use the input class name as the identifier for object(s) and
# their positions
# their positions.
return transformed_location_dict

def convert_obj_location(
Expand Down Expand Up @@ -948,12 +948,8 @@ def arm_object_grasp(self, obj: Object) -> None:
'rgb': process_image_response(image_responses[0]),
'depth': process_image_response(image_responses[1]),
}
# NOTE: we now hard-code the 'yellow brush' to be a
# stand-in for the cube, which is quite a hack.
# We will remove this and do correct object classing
# in a future PR
results = get_pixel_locations_with_detic_sam(
classes=[obj_name_to_vision_prompt['brush']],
obj_class=obj_name_to_vision_prompt[obj.name],
in_res_image=image_for_sam,
plot=CFG.spot_visualize_vision_model_outputs)

Expand Down

0 comments on commit d7ea978

Please sign in to comment.