From 7560f4dc4615b154133900d191032a43b654fd3c Mon Sep 17 00:00:00 2001 From: Hilbert-Johnson <1730625285@qq.com> Date: Sun, 31 Dec 2023 19:13:17 +0800 Subject: [PATCH] update SoM_agent --- mm_agents/SoM_agent.py | 105 +- mm_agents/gemini_test.py | 19 + mm_agents/task_adapter/sam/__init__.py | 0 mm_agents/task_adapter/sam/tasks/__Init__.py | 2 + .../sam/tasks/inference_sam_m2m_auto.py | 103 ++ .../tasks/inference_sam_m2m_interactive.py | 221 +++ mm_agents/task_adapter/seem/__init__.py | 0 mm_agents/task_adapter/seem/tasks/__init__.py | 3 + .../seem/tasks/automatic_mask_generator.py | 382 +++++ .../seem/tasks/inference_seem_interactive.py | 169 ++ .../seem/tasks/inference_seem_pano.py | 164 ++ .../seem/tasks/interactive_seem_m2m_auto.py | 93 ++ .../semantic_sam/tasks/__init__.py | 6 + .../tasks/automatic_mask_generator.py | 393 +++++ .../tasks/inference_semsam_m2m_auto.py | 108 ++ .../tasks/interactive_idino_1o1_box.py | 144 ++ .../tasks/interactive_idino_m2m.py | 322 ++++ .../tasks/interactive_predictor.py | 139 ++ mm_agents/task_adapter/utils/visualizer.py | 1405 +++++++++++++++++ 19 files changed, 3729 insertions(+), 49 deletions(-) create mode 100644 mm_agents/gemini_test.py create mode 100644 mm_agents/task_adapter/sam/__init__.py create mode 100644 mm_agents/task_adapter/sam/tasks/__Init__.py create mode 100644 mm_agents/task_adapter/sam/tasks/inference_sam_m2m_auto.py create mode 100644 mm_agents/task_adapter/sam/tasks/inference_sam_m2m_interactive.py create mode 100755 mm_agents/task_adapter/seem/__init__.py create mode 100644 mm_agents/task_adapter/seem/tasks/__init__.py create mode 100644 mm_agents/task_adapter/seem/tasks/automatic_mask_generator.py create mode 100644 mm_agents/task_adapter/seem/tasks/inference_seem_interactive.py create mode 100644 mm_agents/task_adapter/seem/tasks/inference_seem_pano.py create mode 100644 mm_agents/task_adapter/seem/tasks/interactive_seem_m2m_auto.py create mode 100644 mm_agents/task_adapter/semantic_sam/tasks/__init__.py create mode 100644 mm_agents/task_adapter/semantic_sam/tasks/automatic_mask_generator.py create mode 100644 mm_agents/task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py create mode 100644 mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_1o1_box.py create mode 100644 mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_m2m.py create mode 100644 mm_agents/task_adapter/semantic_sam/tasks/interactive_predictor.py create mode 100644 mm_agents/task_adapter/utils/visualizer.py diff --git a/mm_agents/SoM_agent.py b/mm_agents/SoM_agent.py index d4a265c..e115203 100644 --- a/mm_agents/SoM_agent.py +++ b/mm_agents/SoM_agent.py @@ -3,10 +3,10 @@ import os import re import base64 +import PIL.Image from desktop_env.envs.desktop_env import Action, MouseClick import json import requests -from mm_agents.gpt_4v_prompt import SYS_PROMPT import torch import argparse @@ -33,9 +33,37 @@ from task_adapter.sam.tasks.inference_sam_m2m_interactive import inference_sam_m from scipy.ndimage import label import numpy as np +SYS_PROMPT = ''' +You will act as an agent which follow my instruction and perform desktop computer tasks as instructed. You must have good knowledge of computer and good internet connection. +For each step, you will get an observation of an image, which is the screenshot of the computer screen. And you will predict the action of the computer based on the image. + +Firstly you need to predict the class of your action, select from one below: +- **CLICK**: click on the screen with the specified integer label +- **TYPE**: type a string on the keyboard + +- For CLICK, you need to predict the correct integer label shown on the screenshot +for example, format as: +``` +{ + "action_type": "MOUSE_MOVE", + "label": 7 +} +``` +- For TYPE, you need to specify the text you want to type +for example, format as: +``` +{ + "action_type": "TYPE", + "text": "hello world" +} +``` + +For every step, you should only return the action_type and the parameters of your action as a dict, without any other things. You MUST wrap the dict with backticks (\`). +You can predict multiple actions at one step, but you should only return one action for each step. +You MUST choose and ONLY CHOOSE from the action space above, otherwise your action will be considered as invalid and you will get a penalty. ''' -build args -''' + +# build args semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml" seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml" @@ -47,9 +75,7 @@ opt_semsam = load_opt_from_config_file(semsam_cfg) opt_seem = load_opt_from_config_file(seem_cfg) opt_seem = init_distributed_seem(opt_seem) -''' -build model -''' +# build model model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda() model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda() model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda() @@ -65,65 +91,46 @@ def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs elif slider > 2.5: model_name = 'sam' else: - if mode == 'Automatic': - model_name = 'semantic-sam' - if slider < 1.5 + 0.14: - level = [1] - elif slider < 1.5 + 0.28: - level = [2] - elif slider < 1.5 + 0.42: - level = [3] - elif slider < 1.5 + 0.56: - level = [4] - elif slider < 1.5 + 0.70: - level = [5] - elif slider < 1.5 + 0.84: - level = [6] - else: - level = [6, 1, 2, 3, 4, 5] + model_name = 'semantic-sam' + if slider < 1.5 + 0.14: + level = [1] + elif slider < 1.5 + 0.28: + level = [2] + elif slider < 1.5 + 0.42: + level = [3] + elif slider < 1.5 + 0.56: + level = [4] + elif slider < 1.5 + 0.70: + level = [5] + elif slider < 1.5 + 0.84: + level = [6] else: - model_name = 'sam' + level = [6, 1, 2, 3, 4, 5] - if label_mode == 'Alphabet': - label_mode = 'a' - else: - label_mode = '1' - - text_size, hole_scale, island_scale = 640, 100, 100 + label_mode = 'a' if label_mode == 'Alphabet' else '1' + text_size, hole_scale, island_scale = 1280, 100, 100 text, text_part, text_thresh = '', '', '0.0' with torch.autocast(device_type='cuda', dtype=torch.float16): semantic = False - if mode == "Interactive": - labeled_array, num_features = label(np.asarray(image['mask'].convert('L'))) - spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)]) - if model_name == 'semantic-sam': model = model_semsam - output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs) + output, mask = inference_semsam_m2m_auto(model, image, level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs) elif model_name == 'sam': model = model_sam - if mode == "Automatic": - output, mask = inference_sam_m2m_auto(model, image['image'], text_size, label_mode, alpha, anno_mode) - elif mode == "Interactive": - output, mask = inference_sam_m2m_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) + output, mask = inference_sam_m2m_auto(model, image, text_size, label_mode, alpha, anno_mode) elif model_name == 'seem': model = model_seem - if mode == "Automatic": - output, mask = inference_seem_pano(model, image['image'], text_size, label_mode, alpha, anno_mode) - elif mode == "Interactive": - output, mask = inference_seem_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) + output, mask = inference_seem_pano(model, image, text_size, label_mode, alpha, anno_mode) return output # Function to encode the image -def encode_image(image_path): - with open(image_path, "rb") as image_file: - return base64.b64encode(image_file.read()).decode('utf-8') - +def encode_image(image): + return base64.b64encode(image).decode('utf-8') def parse_actions_from_string(input_string): # Search for a JSON string within the input string @@ -156,7 +163,6 @@ def parse_actions_from_string(input_string): except json.JSONDecodeError as e: raise ValueError("Invalid response format: " + input_string) - class GPT4v_Agent: def __init__(self, api_key, instruction, model="gpt-4-vision-preview", max_tokens=300): self.instruction = instruction @@ -181,7 +187,7 @@ class GPT4v_Agent: ] def predict(self, obs): - obs = inference(obs, slider=2.0, mode="Automatic", alpha=0.1, label_mode="Alphabet", anno_mode=["Mask", "Mark"]) + obs = inference(obs, slider=2.0, mode="Automatic", alpha=0.1, label_mode="Number", anno_mode=["Mark", "Box"]) base64_image = encode_image(obs) self.trajectory.append({ "role": "user", @@ -274,4 +280,5 @@ if __name__ == '__main__': api_key = os.environ.get("OPENAI_API_KEY") agent = GPT4v_Agent(api_key=api_key, instruction="Open Google Sheet") - print(agent.predict(obs="stackoverflow.png")) \ No newline at end of file + obs = PIL.Image.open('stackoverflow.png') + print(agent.predict(obs=obs)) \ No newline at end of file diff --git a/mm_agents/gemini_test.py b/mm_agents/gemini_test.py new file mode 100644 index 0000000..28ecc8a --- /dev/null +++ b/mm_agents/gemini_test.py @@ -0,0 +1,19 @@ +import PIL.Image +import google.generativeai as genai + +genai.configure(api_key="AIzaSyANsETKHVo-D8jZu1SnTSaQgLOJEDgnj9Q") + +# for m in genai.list_models(): +# if 'generateContent' in m.supported_generation_methods: +# print(m.name) + +model = genai.GenerativeModel('gemini-pro-vision') + +img = PIL.Image.open('image.jpg') + +messages = [ + {'role':'user', + 'parts': ["Explain this image.", img]} +] + +response = model.generate_content(messages) \ No newline at end of file diff --git a/mm_agents/task_adapter/sam/__init__.py b/mm_agents/task_adapter/sam/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mm_agents/task_adapter/sam/tasks/__Init__.py b/mm_agents/task_adapter/sam/tasks/__Init__.py new file mode 100644 index 0000000..ce45369 --- /dev/null +++ b/mm_agents/task_adapter/sam/tasks/__Init__.py @@ -0,0 +1,2 @@ +from .inference_sam_m2m_auto import * +from .inference_sam_m2m_interactive import * \ No newline at end of file diff --git a/mm_agents/task_adapter/sam/tasks/inference_sam_m2m_auto.py b/mm_agents/task_adapter/sam/tasks/inference_sam_m2m_auto.py new file mode 100644 index 0000000..d51cf75 --- /dev/null +++ b/mm_agents/task_adapter/sam/tasks/inference_sam_m2m_auto.py @@ -0,0 +1,103 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +import matplotlib.pyplot as plt +import cv2 +import io +from segment_anything import SamAutomaticMaskGenerator + +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + + +def inference_sam_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image) + image_ori = np.asarray(image_ori) + + mask_generator = SamAutomaticMaskGenerator(model) + outputs = mask_generator.generate(image_ori) + + from task_adapter.utils.visualizer import Visualizer + visual = Visualizer(image_ori, metadata=metadata) + sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True) + label = 1 + # for ann in sorted_anns: + # mask = ann['segmentation'] + # color_mask = np.random.random((1, 3)).tolist()[0] + # # color_mask = [int(c*255) for c in color_mask] + # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # label += 1 + # im = demo.get_image() + + mask_map = np.zeros(image_ori.shape, dtype=np.uint8) + for i, ann in enumerate(sorted_anns): + mask = ann['segmentation'] + color_mask = np.random.random((1, 3)).tolist()[0] + # color_mask = [int(c*255) for c in color_mask] + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # assign the mask to the mask_map + mask_map[mask == 1] = label + label += 1 + im = demo.get_image() + # fig=plt.figure(figsize=(10, 10)) + # plt.imshow(image_ori) + # show_anns(outputs) + # fig.canvas.draw() + # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) + return im, sorted_anns + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + +def show_anns(anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in sorted_anns: + m = ann['segmentation'] + img = np.ones((m.shape[0], m.shape[1], 3)) + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack((img, m*0.35))) \ No newline at end of file diff --git a/mm_agents/task_adapter/sam/tasks/inference_sam_m2m_interactive.py b/mm_agents/task_adapter/sam/tasks/inference_sam_m2m_interactive.py new file mode 100644 index 0000000..5752138 --- /dev/null +++ b/mm_agents/task_adapter/sam/tasks/inference_sam_m2m_interactive.py @@ -0,0 +1,221 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import torch.nn.functional as F +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +from kornia.contrib import distance_transform +import matplotlib.pyplot as plt +import cv2 +import io +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +from segment_anything import SamAutomaticMaskGenerator +from segment_anything.utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + +def sam_interactive_mask(mask_generator, points, in_points, in_labels, mask_input): + masks, iou_preds, _ = mask_generator.predictor.predict_torch( + in_points, + in_labels, + mask_input=mask_input, + multimask_output=True, + return_logits=True, + ) + nm,_,h,w = masks.shape + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), + ) + del masks + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], mask_generator.predictor.model.mask_threshold, mask_generator.stability_score_offset + ) + + masks = data["masks"].reshape(nm, -1, h, w) + scores = (data['iou_preds'] + data['stability_score']).reshape(nm, -1) + + index = torch.stack([torch.arange(nm).cuda(), scores.argmax(dim=1)]).tolist() + return masks[index] + +def inference_sam_m2m_interactive(model, image, spatial_masks, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image) + + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + + orig_size = images.shape[-2:] + orig_h, orig_w = orig_size + crop_box = [0,0,orig_w,orig_h] + + spatial_masks = spatial_masks[:, None].float().cuda() + spatial_masks = F.interpolate(spatial_masks, size=(orig_h, orig_w), mode='bicubic', align_corners=False) > 0 + + # generate single center point + # n,_,h,w = spatial_masks.shape + # mask_dt = (distance_transform((~F.pad(spatial_masks, pad=(1, 1, 1, 1), mode='constant', value=0)).float())[:,:,1:-1,1:-1]).reshape(n,-1) + # max_xy_idx = torch.stack([torch.arange(n), mask_dt.max(dim=-1)[1].cpu()]).tolist() + # next_mask = torch.zeros(spatial_masks.shape, device=torch.cuda.current_device()).bool() + # next_mask = next_mask.view(n,-1) + # next_mask[max_xy_idx] = True + # next_mask = next_mask.reshape((n,1,h,w)) + # points = next_mask.nonzero()[:,2:].flip(dims=[1]).cpu().numpy() + + # stack sampled points + acc_points = [] + for i in range(len(spatial_masks)): + points = spatial_masks[i:i+1].nonzero()[:,2:].flip(dims=[1]).cpu().numpy() + rand_ids = np.random.choice(points.shape[0], size=40, replace=True) + points = points[rand_ids] + acc_points.append(points) + _np = len(acc_points) + points = np.concatenate(acc_points) + + mask_generator = SamAutomaticMaskGenerator(model) + mask_generator.predictor.set_image(image_ori) + im_size = image_ori.shape[:-1] + + transformed_points = mask_generator.predictor.transform.apply_coords(points, im_size) + in_points = torch.as_tensor(transformed_points, device=mask_generator.predictor.device).reshape(_np,-1,2).transpose(0,1) + in_labels = torch.ones((in_points.shape[0], _np), dtype=torch.int, device=mask_generator.predictor.device) + + masks = sam_interactive_mask(mask_generator, points, in_points.transpose(0,1), in_labels.transpose(0,1), None) + + masks = masks > 0.0 + iou_preds = torch.ones(masks.shape[0], dtype=torch.float32) + points = torch.zeros((masks.shape[0], 2), dtype=torch.float32) + + mask_data = MaskData( + masks=masks, + iou_preds=iou_preds, + points=points, + ) + + mask_data["stability_score"] = torch.ones(masks.shape[0], dtype=torch.float32) + del masks + + mask_data["boxes"] = batched_mask_to_box(mask_data["masks"]) + mask_data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(mask_data["boxes"]))]) + + # Compress to RLE + mask_data["masks"] = uncrop_masks(mask_data["masks"], crop_box, orig_h, orig_w) + mask_data["rles"] = mask_to_rle_pytorch(mask_data["masks"]) + del mask_data["masks"] + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + + # Write mask records + outputs = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + outputs.append(ann) + + from task_adapter.utils.visualizer import Visualizer + visual = Visualizer(image_ori, metadata=metadata) + sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True) + label = 1 + # for ann in sorted_anns: + # mask = ann['segmentation'] + # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # label += 1 + # im = demo.get_image() + + mask_map = np.zeros(image_ori.shape, dtype=np.uint8) + for i, ann in enumerate(sorted_anns): + mask = ann['segmentation'] + color_mask = np.random.random((1, 3)).tolist()[0] + # color_mask = [int(c*255) for c in color_mask] + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # assign the mask to the mask_map + mask_map[mask == 1] = label + label += 1 + im = demo.get_image() + # fig=plt.figure(figsize=(10, 10)) + # plt.imshow(image_ori) + # show_anns(outputs) + # fig.canvas.draw() + # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) + return im, sorted_anns + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + +def show_anns(anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in sorted_anns: + m = ann['segmentation'] + img = np.ones((m.shape[0], m.shape[1], 3)) + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack((img, m*0.35))) \ No newline at end of file diff --git a/mm_agents/task_adapter/seem/__init__.py b/mm_agents/task_adapter/seem/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/mm_agents/task_adapter/seem/tasks/__init__.py b/mm_agents/task_adapter/seem/tasks/__init__.py new file mode 100644 index 0000000..bff9514 --- /dev/null +++ b/mm_agents/task_adapter/seem/tasks/__init__.py @@ -0,0 +1,3 @@ +from .interactive_seem_m2m_auto import * +from .inference_seem_pano import * +from .inference_seem_interactive import * \ No newline at end of file diff --git a/mm_agents/task_adapter/seem/tasks/automatic_mask_generator.py b/mm_agents/task_adapter/seem/tasks/automatic_mask_generator.py new file mode 100644 index 0000000..66e2317 --- /dev/null +++ b/mm_agents/task_adapter/seem/tasks/automatic_mask_generator.py @@ -0,0 +1,382 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +import torch.nn as nn +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from typing import Any, Dict, List, Optional, Tuple + +from segment_anything.modeling import Sam +from segment_anything.utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SeemAutomaticMaskGenerator: + def __init__( + self, + model: Sam, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.9, + stability_score_thresh: float = 0.5, + stability_score_offset: float = 1.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + ) -> None: + """ + Using a SAM model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM with a ViT-H backbone. + + Arguments: + model (Sam): The SAM model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crop_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crop_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + + if min_mask_region_area > 0: + import cv2 # type: ignore # noqa: F401 + + self.predictor = model + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + + # dilate conv + self.dilation = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7, stride=1, padding=3, bias=False) + self.dilation.weight.data.fill_(1.0) + self.dilation.cuda() + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Filter small disconnected regions and holes in masks + if self.min_mask_region_area > 0: + mask_data = self.postprocess_small_regions( + mask_data, + self.min_mask_region_area, + max(self.box_nms_thresh, self.crop_nms_thresh), + ) + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[-2:] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image#[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[-2:] + # self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] #* points_scale + + # Generate masks for this crop in batches + data = MaskData() + self.enc_features=None + + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch(cropped_im, points, cropped_im_size, crop_box, orig_size) + data.cat(batch_data) + del batch_data + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.box_nms_thresh, + ) + + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + images, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + ) -> MaskData: + orig_h, orig_w = orig_size + + data = {"image": images, "height": orig_h, "width": orig_w} + points = torch.tensor(points,dtype=torch.float).to(images.device) + + # prepare interactive mask for seem + abs_points = (points * torch.tensor(orig_size)[None,:].to(points.device)).long() + abs_masks = torch.zeros((len(points), orig_h, orig_w), dtype=torch.bool).to(device=points.device) + abs_masks[torch.arange(0, abs_points.size(0))[:,None], abs_points[:,0:1], abs_points[:,1:2]] = True + abs_masks = self.dilation(abs_masks[:,None].float())[:,0] > 0 + data['spatial_query'] = {'rand_shape': abs_masks[:,None]} + + batch_inputs = [data] + if self.enc_features is None: + masks, iou_preds, mask_features, transformer_encoder_features, multi_scale_features = self.predictor.model.evaluate_demo(batch_inputs, None, None, return_features=True) + self.enc_features = (mask_features, transformer_encoder_features, multi_scale_features) + else: + masks, iou_preds = self.predictor.model.evaluate_demo(batch_inputs, self.enc_features[0], self.enc_features[1], self.enc_features[2]) + + data = MaskData( + masks=masks, + iou_preds=iou_preds, + points=points, + ) + del masks + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], 0.0, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > 0.0 + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros_like(boxes[:, 0]), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data \ No newline at end of file diff --git a/mm_agents/task_adapter/seem/tasks/inference_seem_interactive.py b/mm_agents/task_adapter/seem/tasks/inference_seem_interactive.py new file mode 100644 index 0000000..a4b3ce9 --- /dev/null +++ b/mm_agents/task_adapter/seem/tasks/inference_seem_interactive.py @@ -0,0 +1,169 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import torch.nn.functional as F +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +import matplotlib.pyplot as plt +import cv2 +import io +from .automatic_mask_generator import SeemAutomaticMaskGenerator +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +from segment_anything.utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +def inference_seem_interactive(model, image, spatial_masks, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image) + + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + + orig_size = images.shape[-2:] + orig_h, orig_w = orig_size + crop_box = [0,0,orig_w,orig_h] + + data = {"image": images, "height": orig_h, "width": orig_w} + + spatial_masks = spatial_masks[:, None].float().cuda() + spatial_masks = F.interpolate(spatial_masks, size=(orig_h, orig_w), mode='bicubic', align_corners=False) > 0 + data['spatial_query'] = {'rand_shape': spatial_masks} + + model.model.metadata = metadata + masks, _ = model.model.evaluate_demo([data]) + masks = masks > 0.0 + iou_preds = torch.ones(masks.shape[0], dtype=torch.float32) + points = torch.zeros((masks.shape[0], 2), dtype=torch.float32) + + mask_data = MaskData( + masks=masks, + iou_preds=iou_preds, + points=points, + ) + + mask_data["stability_score"] = torch.ones(masks.shape[0], dtype=torch.float32) + del masks + + mask_data["boxes"] = batched_mask_to_box(mask_data["masks"]) + mask_data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(mask_data["boxes"]))]) + + # Compress to RLE + mask_data["masks"] = uncrop_masks(mask_data["masks"], crop_box, orig_h, orig_w) + mask_data["rles"] = mask_to_rle_pytorch(mask_data["masks"]) + del mask_data["masks"] + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + + # Write mask records + outputs = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + outputs.append(ann) + + from task_adapter.utils.visualizer import Visualizer + visual = Visualizer(image_ori, metadata=metadata) + sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True) + label = 1 + # for ann in sorted_anns: + # mask = ann['segmentation'] + # color_mask = np.random.random((1, 3)).tolist()[0] + # # color_mask = [int(c*255) for c in color_mask] + # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # label += 1 + # im = demo.get_image() + + mask_map = np.zeros(image_ori.shape, dtype=np.uint8) + for i, ann in enumerate(sorted_anns): + mask = ann['segmentation'] + color_mask = np.random.random((1, 3)).tolist()[0] + # color_mask = [int(c*255) for c in color_mask] + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # assign the mask to the mask_map + mask_map[mask == 1] = label + label += 1 + im = demo.get_image() + # fig=plt.figure(figsize=(10, 10)) + # plt.imshow(image_ori) + # show_anns(outputs) + # fig.canvas.draw() + # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) + return im, sorted_anns + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + +def show_anns(anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in sorted_anns: + m = ann['segmentation'] + img = np.ones((m.shape[0], m.shape[1], 3)) + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack((img, m*0.35))) \ No newline at end of file diff --git a/mm_agents/task_adapter/seem/tasks/inference_seem_pano.py b/mm_agents/task_adapter/seem/tasks/inference_seem_pano.py new file mode 100644 index 0000000..d75af48 --- /dev/null +++ b/mm_agents/task_adapter/seem/tasks/inference_seem_pano.py @@ -0,0 +1,164 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +import matplotlib.pyplot as plt +import cv2 +import io +from .automatic_mask_generator import SeemAutomaticMaskGenerator +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +from segment_anything.utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +def inference_seem_pano(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image) + + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + + orig_size = images.shape[-2:] + orig_h, orig_w = orig_size + crop_box = [0,0,orig_w,orig_h] + + data = {"image": images, "height": orig_h, "width": orig_w} + batch_inputs = [data] + + model.model.metadata = metadata + outputs = model.model.evaluate(batch_inputs) + + pano_mask = outputs[0]['panoptic_seg'][0] + pano_info = outputs[0]['panoptic_seg'][1] + + masks = [] + for seg_info in pano_info: + masks += [pano_mask == seg_info['id']] + masks = torch.stack(masks, dim=0) + iou_preds = torch.ones(masks.shape[0], dtype=torch.float32) + points = torch.zeros((masks.shape[0], 2), dtype=torch.float32) + + mask_data = MaskData( + masks=masks, + iou_preds=iou_preds, + points=points, + ) + mask_data["stability_score"] = torch.ones(masks.shape[0], dtype=torch.float32) + del masks + + mask_data["boxes"] = batched_mask_to_box(mask_data["masks"]) + mask_data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(mask_data["boxes"]))]) + + # Compress to RLE + mask_data["masks"] = uncrop_masks(mask_data["masks"], crop_box, orig_h, orig_w) + mask_data["rles"] = mask_to_rle_pytorch(mask_data["masks"]) + del mask_data["masks"] + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + + # Write mask records + outputs = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + outputs.append(ann) + + from task_adapter.utils.visualizer import Visualizer + visual = Visualizer(image_ori, metadata=metadata) + # create a full zero image as the image_orig + sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True) + label = 1 + mask_map = np.zeros(image_ori.shape, dtype=np.uint8) + for i, ann in enumerate(sorted_anns): + mask = ann['segmentation'] + color_mask = np.random.random((1, 3)).tolist()[0] + # color_mask = [int(c*255) for c in color_mask] + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # assign the mask to the mask_map + mask_map[mask == 1] = label + label += 1 + im = demo.get_image() + # fig=plt.figure(figsize=(10, 10)) + # plt.imshow(image_ori) + # show_anns(outputs) + # fig.canvas.draw() + # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) + return im, sorted_anns + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + +def show_anns(anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in sorted_anns: + m = ann['segmentation'] + img = np.ones((m.shape[0], m.shape[1], 3)) + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack((img, m*0.35))) \ No newline at end of file diff --git a/mm_agents/task_adapter/seem/tasks/interactive_seem_m2m_auto.py b/mm_agents/task_adapter/seem/tasks/interactive_seem_m2m_auto.py new file mode 100644 index 0000000..f35a6a4 --- /dev/null +++ b/mm_agents/task_adapter/seem/tasks/interactive_seem_m2m_auto.py @@ -0,0 +1,93 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +import matplotlib.pyplot as plt +import cv2 +import io +from .automatic_mask_generator import SeemAutomaticMaskGenerator +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +def interactive_seem_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image) + + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + + mask_generator = SeemAutomaticMaskGenerator(model) + outputs = mask_generator.generate(images) + + from task_adapter.utils.visualizer import Visualizer + visual = Visualizer(image_ori, metadata=metadata) + sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True) + label = 1 + for ann in sorted_anns: + mask = ann['segmentation'] + color_mask = np.random.random((1, 3)).tolist()[0] + # color_mask = [int(c*255) for c in color_mask] + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + label += 1 + im = demo.get_image() + + # fig=plt.figure(figsize=(10, 10)) + # plt.imshow(image_ori) + # show_anns(outputs) + # fig.canvas.draw() + # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) + return im + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + +def show_anns(anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in sorted_anns: + m = ann['segmentation'] + img = np.ones((m.shape[0], m.shape[1], 3)) + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack((img, m*0.35))) \ No newline at end of file diff --git a/mm_agents/task_adapter/semantic_sam/tasks/__init__.py b/mm_agents/task_adapter/semantic_sam/tasks/__init__.py new file mode 100644 index 0000000..08e1951 --- /dev/null +++ b/mm_agents/task_adapter/semantic_sam/tasks/__init__.py @@ -0,0 +1,6 @@ +from .interactive_idino_m2m import interactive_infer_image as interactive_infer_image_idino_m2m +from .interactive_idino_m2m import interactive_infer_image_semantic, interactive_infer_image_3l +from .inference_semsam_m2m_auto import inference_semsam_m2m_auto +from .interactive_idino_1o1_box import interactive_infer_image_box as interactive_infer_image_idino_m2m_box +from .automatic_mask_generator import prompt_switch +from .interactive_predictor import SemanticSAMPredictor \ No newline at end of file diff --git a/mm_agents/task_adapter/semantic_sam/tasks/automatic_mask_generator.py b/mm_agents/task_adapter/semantic_sam/tasks/automatic_mask_generator.py new file mode 100644 index 0000000..fe28899 --- /dev/null +++ b/mm_agents/task_adapter/semantic_sam/tasks/automatic_mask_generator.py @@ -0,0 +1,393 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from typing import Any, Dict, List, Optional, Tuple +# from +# from .modeling import Sam +# from .predictor import SamPredictor +from semantic_sam.utils.sam_utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +def prompt_switch(p): + p = int(p) + if p == 1: + return 3 + if p == 2: + return 2 + if p == 3: + return 0 + if p == 4: + return 4 + if p == 5: + return 1 + if p == 6: + return 5 + else: + raise NotImplementedError + + +class SemanticSamAutomaticMaskGenerator: + def __init__( + self, + model, + points_per_side: Optional[int] = 32, + points_per_batch: int = 200, + pred_iou_thresh: float = 0.88, + stability_score_thresh: float = 0.92, + stability_score_offset: float = 1.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 10, + output_mode: str = "binary_mask", + level: list = [1, 2, 3, 4, 5, 6], + ) -> None: + """ + Using a SAM model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM with a ViT-H backbone. + + Arguments: + model (Sam): The SAM model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crops_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crops_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + """ + self.level = [prompt_switch(l) for l in level] + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + + if min_mask_region_area > 0: + import cv2 # type: ignore # noqa: F401 + + self.predictor = model + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Filter small disconnected regions and holes in masks + if self.min_mask_region_area > 0: + mask_data = self.postprocess_small_regions( + mask_data, + self.min_mask_region_area, + max(self.box_nms_thresh, self.crop_nms_thresh), + ) + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[-2:] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + assert len(crop_boxes)==1 + data = MaskData() + # import ipdb; ipdb.set_trace() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + + data.cat(crop_data) + # import ipdb; ipdb.set_trace() + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image#[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[-2:] + # self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] #* points_scale + + # Generate masks for this crop in batches + data = MaskData() + self.enc_features=None + # import ipdb; ipdb.set_trace() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch(cropped_im,points, cropped_im_size, crop_box, orig_size) + data.cat(batch_data) + del batch_data + + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.box_nms_thresh, + ) + # import ipdb; ipdb.set_trace() + data.filter(keep_by_nms) + # import ipdb; ipdb.set_trace() + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + images, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + ) -> MaskData: + orig_h, orig_w = orig_size + + data = {"image": images, "height": orig_h, "width": orig_w} + points=torch.tensor(points,dtype=torch.float).to(images.device) + points = torch.cat([points, points.new_tensor([[0.005, 0.005]]).repeat(len(points), 1)], dim=-1) + data['targets'] = [dict()] + data['targets'][0]['points']=points + data['targets'][0]['pb']=points.new_tensor([0.]*len(points)) + batch_inputs = [data] + if self.enc_features is None: + masks, iou_preds,mask_features,multi_scale_features= self.predictor.model.evaluate_demo(batch_inputs,None,None,return_features=True, level=self.level) + self.enc_features=(mask_features,multi_scale_features) + else: + masks, iou_preds= self.predictor.model.evaluate_demo(batch_inputs,None,None,self.enc_features[0],self.enc_features[1], level=self.level) + + data = MaskData( + masks=masks, + iou_preds=iou_preds.flatten(), + points=torch.as_tensor(points[:,None].repeat(1,len(self.level), 1).view(-1,4)), + ) + del masks + # Filter by predicted IoU + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], 0.0, self.stability_score_offset + ) + # if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > 0.0 + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros(len(boxes)), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data \ No newline at end of file diff --git a/mm_agents/task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py b/mm_agents/task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py new file mode 100644 index 0000000..a939a3c --- /dev/null +++ b/mm_agents/task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py @@ -0,0 +1,108 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +import matplotlib.pyplot as plt +import cv2 +import io +from .automatic_mask_generator import SemanticSamAutomaticMaskGenerator +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image) + + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + + mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32, + pred_iou_thresh=0.88, + stability_score_thresh=0.92, + min_mask_region_area=10, + level=level, + ) + outputs = mask_generator.generate(images) + + from task_adapter.utils.visualizer import Visualizer + visual = Visualizer(image_ori, metadata=metadata) + sorted_anns = sorted(outputs, key=(lambda x: x['area']), reverse=True) + label = 1 + # for ann in sorted_anns: + # mask = ann['segmentation'] + # color_mask = np.random.random((1, 3)).tolist()[0] + # # color_mask = [int(c*255) for c in color_mask] + # demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # label += 1 + # im = demo.get_image() + + mask_map = np.zeros(image_ori.shape, dtype=np.uint8) + for i, ann in enumerate(sorted_anns): + mask = ann['segmentation'] + color_mask = np.random.random((1, 3)).tolist()[0] + # color_mask = [int(c*255) for c in color_mask] + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + # assign the mask to the mask_map + mask_map[mask == 1] = label + label += 1 + im = demo.get_image() + # fig=plt.figure(figsize=(10, 10)) + # plt.imshow(image_ori) + # show_anns(outputs) + # fig.canvas.draw() + # im=Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) + return im, sorted_anns + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + +def show_anns(anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in sorted_anns: + m = ann['segmentation'] + img = np.ones((m.shape[0], m.shape[1], 3)) + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack((img, m*0.35))) \ No newline at end of file diff --git a/mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_1o1_box.py b/mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_1o1_box.py new file mode 100644 index 0000000..ccfe774 --- /dev/null +++ b/mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_1o1_box.py @@ -0,0 +1,144 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +from detectron2.structures import BitMasks +from semantic_sam.utils import box_ops + +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +def interactive_infer_image_box(model, image,all_classes,all_parts, thresh,text_size,hole_scale,island_scale,semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image['image']) + mask_ori = transform1(image['mask']) + width = image_ori.size[0] + height = image_ori.size[1] + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + all_classes, all_parts=all_classes.strip().strip("\"[]").split(':'),all_parts.strip().strip("\"[]").split(':') + + + data = {"image": images, "height": height, "width": width} + + mask_ori = np.asarray(mask_ori)[:,:,0:1].copy() + mask_ori = torch.from_numpy(mask_ori).permute(2,0,1)[0] + flaten_mask = mask_ori.unsqueeze(0) + # import ipdb; ipdb.set_trace() + points=mask_ori.nonzero().float().to(images.device) + if len(points)==0: + point_=point=points.new_tensor([[0.5,0.5,0.5,0.5]]) + else: + mean_point=points.mean(0)[None] + box_xyxy = BitMasks(flaten_mask > 0).get_bounding_boxes().tensor + h = mask_ori.shape[0] + w = mask_ori.shape[1] + box_xywh = (box_ops.box_xyxy_to_cxcywh(box_xyxy) / torch.as_tensor([w, h, w, h])).cuda() + + # point_=points.mean(0)[None] + # point=point_.clone() + # point[0, 0] = point_[0, 0] / mask_ori.shape[0] + # point[0, 1] = point_[0, 1] / mask_ori.shape[1] + # point = point[:, [1, 0]] + point=box_xywh + data['targets'] = [dict()] + data['targets'][0]['points']=point + data['targets'][0]['pb']=point.new_tensor([1.]) + + + batch_inputs = [data] + masks,ious = model.model.evaluate_demo(batch_inputs,all_classes,all_parts, task='demo_box') + + pred_masks_poses = masks + reses=[] + ious=ious[0,0] + ids=torch.argsort(ious,descending=True) + + text_res='' + try: + thresh=float(thresh) + except Exception: + thresh=0.0 + mask_ls=[] + ious_res=[] + areas=[] + for i,(pred_masks_pos,iou) in enumerate(zip(pred_masks_poses[ids],ious[ids])): + iou=round(float(iou),2) + texts=f'{iou}' + mask=(pred_masks_pos>0.0).cpu().numpy() + area=mask.sum() + conti=False + if iou0.95: + conti=True + break + if i == len(pred_masks_poses[ids])-1 and mask_ls==[]: + conti=False + if conti: + continue + ious_res.append(iou) + mask_ls.append(mask) + areas.append(area) + mask,_=remove_small_regions(mask,int(hole_scale),mode="holes") + mask,_=remove_small_regions(mask,int(island_scale),mode="islands") + mask=(mask).astype(np.float) + out_txt = texts + visual = Visualizer(image_ori, metadata=metadata) + color=[0.,0.,1.0] + demo = visual.draw_binary_mask(mask, color=color, text=texts) + demo = visual.draw_box(box_xyxy[0]) + res = demo.get_image() + # point_x0=max(0,int(point_[0, 1])-3) + # point_x1=min(mask_ori.shape[1],int(point_[0, 1])+3) + # point_y0 = max(0, int(point_[0, 0]) - 3) + # point_y1 = min(mask_ori.shape[0], int(point_[0, 0]) + 3) + # res[point_y0:point_y1,point_x0:point_x1,0]=255 + # res[point_y0:point_y1,point_x0:point_x1,1]=0 + # res[point_y0:point_y1,point_x0:point_x1,2]=0 + reses.append(Image.fromarray(res)) + text_res=text_res+';'+out_txt + ids=list(torch.argsort(torch.tensor(areas),descending=False)) + ids = [int(i) for i in ids] + + torch.cuda.empty_cache() + + return reses,[reses[i] for i in ids] + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True \ No newline at end of file diff --git a/mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_m2m.py b/mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_m2m.py new file mode 100644 index 0000000..93775c3 --- /dev/null +++ b/mm_agents/task_adapter/semantic_sam/tasks/interactive_idino_m2m.py @@ -0,0 +1,322 @@ +# -------------------------------------------------------- +# Semantic-SAM: Segment and Recognize Anything at Any Granularity +# Copyright (c) 2023 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Hao Zhang (hzhangcx@connect.ust.hk) +# -------------------------------------------------------- + +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + +def interactive_infer_image(model, image,all_classes,all_parts, thresh,text_size,hole_scale,island_scale,semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image['image']) + mask_ori = transform1(image['mask']) + width = image_ori.size[0] + height = image_ori.size[1] + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + all_classes, all_parts=all_classes.strip().strip("\"[]").split(':'),all_parts.strip().strip("\"[]").split(':') + + + data = {"image": images, "height": height, "width": width} + + mask_ori = np.asarray(mask_ori)[:,:,0:1].copy() + mask_ori = torch.from_numpy(mask_ori).permute(2,0,1)[0] + points=mask_ori.nonzero().float().to(images.device) + if len(points)==0: + point_=point=points.new_tensor([[0.5,0.5,0.006,0.006]]) + else: + point_=points.mean(0)[None] + point=point_.clone() + point[0, 0] = point_[0, 0] / mask_ori.shape[0] + point[0, 1] = point_[0, 1] / mask_ori.shape[1] + point = point[:, [1, 0]] + point=torch.cat([point,points.new_tensor([[0.005,0.005]])],dim=-1) + data['targets'] = [dict()] + data['targets'][0]['points']=point + data['targets'][0]['pb']=point.new_tensor([0.]) + + + batch_inputs = [data] + masks,ious = model.model.evaluate_demo(batch_inputs,all_classes,all_parts) + + pred_masks_poses = masks + reses=[] + ious=ious[0,0] + ids=torch.argsort(ious,descending=True) + + text_res='' + try: + thresh=float(thresh) + except Exception: + thresh=0.0 + mask_ls=[] + ious_res=[] + areas=[] + for i,(pred_masks_pos,iou) in enumerate(zip(pred_masks_poses[ids],ious[ids])): + iou=round(float(iou),2) + texts=f'{iou}' + mask=(pred_masks_pos>0.0).cpu().numpy() + area=mask.sum() + conti=False + if iou0.95: + conti=True + break + if i == len(pred_masks_poses[ids])-1 and mask_ls==[]: + conti=False + if conti: + continue + ious_res.append(iou) + mask_ls.append(mask) + areas.append(area) + mask,_=remove_small_regions(mask,int(hole_scale),mode="holes") + mask,_=remove_small_regions(mask,int(island_scale),mode="islands") + mask=(mask).astype(np.float) + out_txt = texts + visual = Visualizer(image_ori, metadata=metadata) + color=[0.,0.,1.0] + # demo = visual.draw_binary_mask(mask, color=color, text=texts) + demo = visual.draw_binary_mask_with_number(mask, text=str(label), label_mode=label_mode, alpha=alpha, anno_mode=anno_mode) + res = demo.get_image() + point_x0=max(0,int(point_[0, 1])-3) + point_x1=min(mask_ori.shape[1],int(point_[0, 1])+3) + point_y0 = max(0, int(point_[0, 0]) - 3) + point_y1 = min(mask_ori.shape[0], int(point_[0, 0]) + 3) + # res[point_y0:point_y1,point_x0:point_x1,0]=255 + # res[point_y0:point_y1,point_x0:point_x1,1]=0 + # res[point_y0:point_y1,point_x0:point_x1,2]=0 + reses.append(Image.fromarray(res)) + text_res=text_res+';'+out_txt + ids=list(torch.argsort(torch.tensor(areas),descending=False)) + ids = [int(i) for i in ids] + + torch.cuda.empty_cache() + + return reses,[reses[i] for i in ids] + +def interactive_infer_image_3l(model, image,all_classes,all_parts, thresh,text_size,hole_scale,island_scale,semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image['image']) + mask_ori = transform1(image['mask']) + width = image_ori.size[0] + height = image_ori.size[1] + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + all_classes, all_parts=all_classes.strip().strip("\"[]").split(':'),all_parts.strip().strip("\"[]").split(':') + + + data = {"image": images, "height": height, "width": width} + + mask_ori = np.asarray(mask_ori)[:,:,0:1].copy() + mask_ori = torch.from_numpy(mask_ori).permute(2,0,1)[0] + points=mask_ori.nonzero().float().to(images.device) + if len(points)==0: + point_=point=points.new_tensor([[0.5,0.5,0.006,0.006]]) + else: + point_=points.mean(0)[None] + point=point_.clone() + point[0, 0] = point_[0, 0] / mask_ori.shape[0] + point[0, 1] = point_[0, 1] / mask_ori.shape[1] + point = point[:, [1, 0]] + point=torch.cat([point,points.new_tensor([[0.005,0.005]])],dim=-1) + data['targets'] = [dict()] + data['targets'][0]['points']=point + data['targets'][0]['pb']=point.new_tensor([0.]) + + + batch_inputs = [data] + masks, ious, pred_class, pred_class_score = model.model.evaluate_demo(batch_inputs,all_classes,all_parts, level=[0,1,2]) + + pred_masks_poses = masks + reses=[] + ious=ious[0,0] + ids=torch.argsort(ious,descending=True) + + text_res='' + try: + thresh=float(thresh) + except Exception: + thresh=0.0 + mask_ls=[] + ious_res=[] + areas=[] + new_pred_class = [] + new_pred_class_score = [] + for i in ids: + new_pred_class_score.append(pred_class_score[i]) + new_pred_class.append(pred_class[i]) + # import ipdb; ipdb.set_trace() + for i,(pred_masks_pos,iou, cls_name, cls_score) in enumerate(zip(pred_masks_poses[ids],ious[ids], new_pred_class, new_pred_class_score)): + iou=round(float(iou),2) + texts=f'{iou}_{cls_name}_{cls_score}' + mask=(pred_masks_pos>0.0).cpu().numpy() + area=mask.sum() + conti=False + if iou0.95: + conti=True + break + if i == len(pred_masks_poses[ids])-1 and mask_ls==[]: + conti=False + if conti: + continue + ious_res.append(iou) + mask_ls.append(mask) + areas.append(area) + mask,_=remove_small_regions(mask,int(hole_scale),mode="holes") + mask,_=remove_small_regions(mask,int(island_scale),mode="islands") + mask=(mask).astype(np.float) + out_txt = texts + visual = Visualizer(image_ori, metadata=metadata) + color=[0.,0.,1.0] + demo = visual.draw_binary_mask(mask, color=color, text=texts) + res = demo.get_image() + point_x0=max(0,int(point_[0, 1])-3) + point_x1=min(mask_ori.shape[1],int(point_[0, 1])+3) + point_y0 = max(0, int(point_[0, 0]) - 3) + point_y1 = min(mask_ori.shape[0], int(point_[0, 0]) + 3) + res[point_y0:point_y1,point_x0:point_x1,0]=255 + res[point_y0:point_y1,point_x0:point_x1,1]=0 + res[point_y0:point_y1,point_x0:point_x1,2]=0 + reses.append(Image.fromarray(res)) + text_res=text_res+';'+out_txt + ids=list(torch.argsort(torch.tensor(areas),descending=False)) + ids = [int(i) for i in ids] + + torch.cuda.empty_cache() + + return reses,[reses[i] for i in ids] + +def interactive_infer_image_semantic(model, image,all_classes,all_parts, thresh,text_size,hole_scale,island_scale,semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None): + t = [] + t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) + transform1 = transforms.Compose(t) + image_ori = transform1(image['image']) + mask_ori = transform1(image['mask']) + width = image_ori.size[0] + height = image_ori.size[1] + image_ori = np.asarray(image_ori) + images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() + all_classes, all_parts=all_classes.strip().strip("\"[]").split(':'),all_parts.strip().strip("\"[]").split(':') + + + data = {"image": images, "height": height, "width": width} + + mask_ori = np.asarray(mask_ori)[:,:,0:1].copy() + mask_ori = torch.from_numpy(mask_ori).permute(2,0,1)[0] + points=mask_ori.nonzero().float().to(images.device) + if len(points)==0: + point_=point=points.new_tensor([[0.5,0.5,0.006,0.006]]) + else: + point_=points.mean(0)[None] + point=point_.clone() + point[0, 0] = point_[0, 0] / mask_ori.shape[0] + point[0, 1] = point_[0, 1] / mask_ori.shape[1] + point = point[:, [1, 0]] + point=torch.cat([point,points.new_tensor([[0.005,0.005]])],dim=-1) + data['targets'] = [dict()] + data['targets'][0]['points']=point + data['targets'][0]['pb']=point.new_tensor([0.]) + data['targets'][0]['pb']=point.new_tensor([1.]) + + + batch_inputs = [data] + masks,ious = model.model.evaluate_demo(batch_inputs,all_classes,all_parts) + + pred_masks_poses = masks + reses=[] + ious=ious[0,0] + ids=torch.argsort(ious,descending=True) + + text_res='' + try: + thresh=float(thresh) + except Exception: + thresh=0.0 + mask_ls=[] + ious_res=[] + areas=[] + for i,(pred_masks_pos,iou) in enumerate(zip(pred_masks_poses[ids],ious[ids])): + iou=round(float(iou),2) + texts=f'{iou}' + mask=(pred_masks_pos>0.0).cpu().numpy() + area=mask.sum() + conti=False + if iou0.95: + conti=True + break + if i == len(pred_masks_poses[ids])-1 and mask_ls==[]: + conti=False + if conti: + continue + ious_res.append(iou) + mask_ls.append(mask) + areas.append(area) + mask,_=remove_small_regions(mask,int(hole_scale),mode="holes") + mask,_=remove_small_regions(mask,int(island_scale),mode="islands") + mask=(mask).astype(np.float) + out_txt = texts + visual = Visualizer(image_ori, metadata=metadata) + color=[0.,0.,1.0] + demo = visual.draw_binary_mask(mask, color=color, text=texts) + res = demo.get_image() + point_x0=max(0,int(point_[0, 1])-3) + point_x1=min(mask_ori.shape[1],int(point_[0, 1])+3) + point_y0 = max(0, int(point_[0, 0]) - 3) + point_y1 = min(mask_ori.shape[0], int(point_[0, 0]) + 3) + res[point_y0:point_y1,point_x0:point_x1,0]=255 + res[point_y0:point_y1,point_x0:point_x1,1]=0 + res[point_y0:point_y1,point_x0:point_x1,2]=0 + reses.append(Image.fromarray(res)) + text_res=text_res+';'+out_txt + ids=list(torch.argsort(torch.tensor(areas),descending=False)) + ids = [int(i) for i in ids] + + torch.cuda.empty_cache() + + return reses,[reses[i] for i in ids] + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True \ No newline at end of file diff --git a/mm_agents/task_adapter/semantic_sam/tasks/interactive_predictor.py b/mm_agents/task_adapter/semantic_sam/tasks/interactive_predictor.py new file mode 100644 index 0000000..70252a3 --- /dev/null +++ b/mm_agents/task_adapter/semantic_sam/tasks/interactive_predictor.py @@ -0,0 +1,139 @@ +import torch +import numpy as np +from torchvision import transforms +from task_adapter.utils.visualizer import Visualizer +from typing import Tuple +from PIL import Image +from detectron2.data import MetadataCatalog +metadata = MetadataCatalog.get('coco_2017_train_panoptic') + + +class SemanticSAMPredictor: + def __init__(self, model, thresh=0.5, text_size=640, hole_scale=100, island_scale=100): + """ + thresh: iou thresh to filter low confidence objects + text_size: resize the input image short edge for the model to process + hole_scale: fill in small holes as in SAM + island_scale: remove small regions as in SAM + """ + self.model = model + self.thresh = thresh + self.text_size = hole_scale + self.hole_scale = hole_scale + self.island_scale = island_scale + self.point = None + + def predict(self, image_ori, image, point=None): + """ + produce up to 6 prediction results for each click + """ + width = image_ori.shape[0] + height = image_ori.shape[1] + + data = {"image": image, "height": height, "width": width} + # import ipdb; ipdb.set_trace() + if point is None: + point = torch.tensor([[0.5, 0.5, 0.006, 0.006]]).cuda() + else: + point = torch.tensor(point).cuda() + point_ = point + point = point_.clone() + point[0, 0] = point_[0, 0] + point[0, 1] = point_[0, 1] + # point = point[:, [1, 0]] + point = torch.cat([point, point.new_tensor([[0.005, 0.005]])], dim=-1) + + self.point = point[:, :2].clone()*(torch.tensor([width, height]).to(point)) + + data['targets'] = [dict()] + data['targets'][0]['points'] = point + data['targets'][0]['pb'] = point.new_tensor([0.]) + + batch_inputs = [data] + masks, ious = self.model.model.evaluate_demo(batch_inputs) + + return masks, ious + + def process_multi_mask(self, masks, ious, image_ori): + pred_masks_poses = masks + reses = [] + ious = ious[0, 0] + ids = torch.argsort(ious, descending=True) + + text_res = '' + mask_ls = [] + ious_res = [] + areas = [] + for i, (pred_masks_pos, iou) in enumerate(zip(pred_masks_poses[ids], ious[ids])): + iou = round(float(iou), 2) + texts = f'{iou}' + mask = (pred_masks_pos > 0.0).cpu().numpy() + area = mask.sum() + conti = False + if iou < self.thresh: + conti = True + for m in mask_ls: + if np.logical_and(mask, m).sum() / np.logical_or(mask, m).sum() > 0.95: + conti = True + break + if i == len(pred_masks_poses[ids]) - 1 and mask_ls == []: + conti = False + if conti: + continue + ious_res.append(iou) + mask_ls.append(mask) + areas.append(area) + mask, _ = self.remove_small_regions(mask, int(self.hole_scale), mode="holes") + mask, _ = self.remove_small_regions(mask, int(self.island_scale), mode="islands") + mask = (mask).astype(np.float) + out_txt = texts + visual = Visualizer(image_ori, metadata=metadata) + color = [0., 0., 1.0] + demo = visual.draw_binary_mask(mask, color=color, text=texts) + res = demo.get_image() + point_x0 = max(0, int(self.point[0, 0]) - 3) + point_x1 = min(image_ori.shape[1], int(self.point[0, 0]) + 3) + point_y0 = max(0, int(self.point[0, 1]) - 3) + point_y1 = min(image_ori.shape[0], int(self.point[0, 1]) + 3) + res[point_y0:point_y1, point_x0:point_x1, 0] = 255 + res[point_y0:point_y1, point_x0:point_x1, 1] = 0 + res[point_y0:point_y1, point_x0:point_x1, 2] = 0 + reses.append(Image.fromarray(res)) + text_res = text_res + ';' + out_txt + ids = list(torch.argsort(torch.tensor(areas), descending=False)) + ids = [int(i) for i in ids] + + torch.cuda.empty_cache() + + return reses, [reses[i] for i in ids] + + def predict_masks(self, image_ori, image, point=None): + masks, ious = self.predict(image_ori, image, point) + return self.process_multi_mask(masks, ious, image_ori) + + @staticmethod + def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str + ) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True diff --git a/mm_agents/task_adapter/utils/visualizer.py b/mm_agents/task_adapter/utils/visualizer.py new file mode 100644 index 0000000..bd78a98 --- /dev/null +++ b/mm_agents/task_adapter/utils/visualizer.py @@ -0,0 +1,1405 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import colorsys +import logging +import math +import numpy as np +from enum import Enum, unique +import cv2 +import matplotlib as mpl +import matplotlib.colors as mplc +import matplotlib.figure as mplfigure +import pycocotools.mask as mask_util +import torch +from matplotlib.backends.backend_agg import FigureCanvasAgg +from PIL import Image + +from detectron2.data import MetadataCatalog +from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes +from detectron2.utils.file_io import PathManager + +from detectron2.utils.colormap import random_color +import random + +logger = logging.getLogger(__name__) + +__all__ = ["ColorMode", "VisImage", "Visualizer"] + + +_SMALL_OBJECT_AREA_THRESH = 1000 +_LARGE_MASK_AREA_THRESH = 120000 +_OFF_WHITE = (1.0, 1.0, 240.0 / 255) +_BLACK = (0, 0, 0) +_RED = (1.0, 0, 0) + +_KEYPOINT_THRESHOLD = 0.05 + + +@unique +class ColorMode(Enum): + """ + Enum of different color modes to use for instance visualizations. + """ + + IMAGE = 0 + """ + Picks a random color for every instance and overlay segmentations with low opacity. + """ + SEGMENTATION = 1 + """ + Let instances of the same category have similar colors + (from metadata.thing_colors), and overlay them with + high opacity. This provides more attention on the quality of segmentation. + """ + IMAGE_BW = 2 + """ + Same as IMAGE, but convert all areas without masks to gray-scale. + Only available for drawing per-instance mask predictions. + """ + + +class GenericMask: + """ + Attribute: + polygons (list[ndarray]): list[ndarray]: polygons for this mask. + Each ndarray has format [x, y, x, y, ...] + mask (ndarray): a binary mask + """ + + def __init__(self, mask_or_polygons, height, width): + self._mask = self._polygons = self._has_holes = None + self.height = height + self.width = width + + m = mask_or_polygons + if isinstance(m, dict): + # RLEs + assert "counts" in m and "size" in m + if isinstance(m["counts"], list): # uncompressed RLEs + h, w = m["size"] + assert h == height and w == width + m = mask_util.frPyObjects(m, h, w) + self._mask = mask_util.decode(m)[:, :] + return + + if isinstance(m, list): # list[ndarray] + self._polygons = [np.asarray(x).reshape(-1) for x in m] + return + + if isinstance(m, np.ndarray): # assumed to be a binary mask + assert m.shape[1] != 2, m.shape + assert m.shape == ( + height, + width, + ), f"mask shape: {m.shape}, target dims: {height}, {width}" + self._mask = m.astype("uint8") + return + + raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) + + @property + def mask(self): + if self._mask is None: + self._mask = self.polygons_to_mask(self._polygons) + return self._mask + + @property + def polygons(self): + if self._polygons is None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + return self._polygons + + @property + def has_holes(self): + if self._has_holes is None: + if self._mask is not None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + else: + self._has_holes = False # if original format is polygon, does not have holes + return self._has_holes + + def mask_to_polygons(self, mask): + # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level + # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. + # Internal contours (holes) are placed in hierarchy-2. + # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. + mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr + res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + hierarchy = res[-1] + if hierarchy is None: # empty mask + return [], False + has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 + res = res[-2] + res = [x.flatten() for x in res] + # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. + # We add 0.5 to turn them into real-value coordinate space. A better solution + # would be to first +0.5 and then dilate the returned polygon by 0.5. + res = [x + 0.5 for x in res if len(x) >= 6] + return res, has_holes + + def polygons_to_mask(self, polygons): + rle = mask_util.frPyObjects(polygons, self.height, self.width) + rle = mask_util.merge(rle) + return mask_util.decode(rle)[:, :] + + def area(self): + return self.mask.sum() + + def bbox(self): + p = mask_util.frPyObjects(self.polygons, self.height, self.width) + p = mask_util.merge(p) + bbox = mask_util.toBbox(p) + bbox[2] += bbox[0] + bbox[3] += bbox[1] + return bbox + + +class _PanopticPrediction: + """ + Unify different panoptic annotation/prediction formats + """ + + def __init__(self, panoptic_seg, segments_info, metadata=None): + if segments_info is None: + assert metadata is not None + # If "segments_info" is None, we assume "panoptic_img" is a + # H*W int32 image storing the panoptic_id in the format of + # category_id * label_divisor + instance_id. We reserve -1 for + # VOID label. + label_divisor = metadata.label_divisor + segments_info = [] + for panoptic_label in np.unique(panoptic_seg.numpy()): + if panoptic_label == -1: + # VOID region. + continue + pred_class = panoptic_label // label_divisor + isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() + segments_info.append( + { + "id": int(panoptic_label), + "category_id": int(pred_class), + "isthing": bool(isthing), + } + ) + del metadata + + self._seg = panoptic_seg + + self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info + segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) + areas = areas.numpy() + sorted_idxs = np.argsort(-areas) + self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] + self._seg_ids = self._seg_ids.tolist() + for sid, area in zip(self._seg_ids, self._seg_areas): + if sid in self._sinfo: + self._sinfo[sid]["area"] = float(area) + + def non_empty_mask(self): + """ + Returns: + (H, W) array, a mask for all pixels that have a prediction + """ + empty_ids = [] + for id in self._seg_ids: + if id not in self._sinfo: + empty_ids.append(id) + if len(empty_ids) == 0: + return np.zeros(self._seg.shape, dtype=np.uint8) + assert ( + len(empty_ids) == 1 + ), ">1 ids corresponds to no labels. This is currently not supported" + return (self._seg != empty_ids[0]).numpy().astype(np.bool) + + def semantic_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or sinfo["isthing"]: + # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. + continue + yield (self._seg == sid).numpy().astype(np.bool), sinfo + + def instance_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or not sinfo["isthing"]: + continue + mask = (self._seg == sid).numpy().astype(np.bool) + if mask.sum() > 0: + yield mask, sinfo + + +def _create_text_labels(classes, scores, class_names, is_crowd=None): + """ + Args: + classes (list[int] or None): + scores (list[float] or None): + class_names (list[str] or None): + is_crowd (list[bool] or None): + + Returns: + list[str] or None + """ + labels = None + if classes is not None: + if class_names is not None and len(class_names) > 0: + labels = [class_names[i] for i in classes] + else: + labels = [str(i) for i in classes] + if scores is not None: + if labels is None: + labels = ["{:.0f}%".format(s * 100) for s in scores] + else: + labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] + if labels is not None and is_crowd is not None: + labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] + return labels + + +class VisImage: + def __init__(self, img, scale=1.0): + """ + Args: + img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. + scale (float): scale the input image + """ + self.img = img + self.scale = scale + self.width, self.height = img.shape[1], img.shape[0] + self._setup_figure(img) + + def _setup_figure(self, img): + """ + Args: + Same as in :meth:`__init__()`. + + Returns: + fig (matplotlib.pyplot.figure): top level container for all the image plot elements. + ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. + """ + fig = mplfigure.Figure(frameon=False) + self.dpi = fig.get_dpi() + # add a small 1e-2 to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches( + (self.width * self.scale + 1e-2) / self.dpi, + (self.height * self.scale + 1e-2) / self.dpi, + ) + self.canvas = FigureCanvasAgg(fig) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) + ax.axis("off") + self.fig = fig + self.ax = ax + self.reset_image(img) + + def reset_image(self, img): + """ + Args: + img: same as in __init__ + """ + img = img.astype("uint8") + self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") + + def save(self, filepath): + """ + Args: + filepath (str): a string that contains the absolute path, including the file name, where + the visualized image will be saved. + """ + self.fig.savefig(filepath) + + def get_image(self): + """ + Returns: + ndarray: + the visualized image of shape (H, W, 3) (RGB) in uint8 type. + The shape is scaled w.r.t the input image using the given `scale` argument. + """ + canvas = self.canvas + s, (width, height) = canvas.print_to_buffer() + # buf = io.BytesIO() # works for cairo backend + # canvas.print_rgba(buf) + # width, height = self.width, self.height + # s = buf.getvalue() + + buffer = np.frombuffer(s, dtype="uint8") + + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + return rgb.astype("uint8") + + +class Visualizer: + """ + Visualizer that draws data about detection/segmentation on images. + + It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` + that draw primitive objects to images, as well as high-level wrappers like + `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` + that draw composite data in some pre-defined style. + + Note that the exact visualization style for the high-level wrappers are subject to change. + Style such as color, opacity, label contents, visibility of labels, or even the visibility + of objects themselves (e.g. when the object is too small) may change according + to different heuristics, as long as the results still look visually reasonable. + + To obtain a consistent style, you can implement custom drawing functions with the + abovementioned primitive methods instead. If you need more customized visualization + styles, you can process the data yourself following their format documented in + tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not + intend to satisfy everyone's preference on drawing styles. + + This visualizer focuses on high rendering quality rather than performance. It is not + designed to be used for real-time applications. + """ + + # TODO implement a fast, rasterized version using OpenCV + + def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): + """ + Args: + img_rgb: a numpy array of shape (H, W, C), where H and W correspond to + the height and width of the image respectively. C is the number of + color channels. The image is required to be in RGB format since that + is a requirement of the Matplotlib library. The image is also expected + to be in the range [0, 255]. + metadata (Metadata): dataset metadata (e.g. class names and colors) + instance_mode (ColorMode): defines one of the pre-defined style for drawing + instances on an image. + """ + self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) + if metadata is None: + metadata = MetadataCatalog.get("__nonexist__") + self.metadata = metadata + self.output = VisImage(self.img, scale=scale) + self.cpu_device = torch.device("cpu") + + # too small texts are useless, therefore clamp to 9 + self._default_font_size = max( + np.sqrt(self.output.height * self.output.width) // 90, 10 // scale + ) + self._default_font_size = 18 + self._instance_mode = instance_mode + self.keypoint_threshold = _KEYPOINT_THRESHOLD + + import matplotlib.colors as mcolors + css4_colors = mcolors.CSS4_COLORS + self.color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()] + + def draw_instance_predictions(self, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + keep = (scores > 0.5).cpu() + boxes = boxes[keep] + scores = scores[keep] + classes = np.array(classes) + classes = classes[np.array(keep)] + labels = np.array(labels) + labels = labels[np.array(keep)] + + if predictions.has("pred_masks"): + masks = np.asarray(predictions.pred_masks) + masks = masks[np.array(keep)] + masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] + else: + masks = None + + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + # if self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes + ] + alpha = 0.4 + else: + colors = None + alpha = 0.4 + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image( + self._create_grayscale_image( + (predictions.pred_masks.any(dim=0) > 0).numpy() + if predictions.has("pred_masks") + else None + ) + ) + alpha = 0.3 + + self.overlay_instances( + masks=masks, + boxes=boxes, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + return self.output + + def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.7): + """ + Draw semantic segmentation predictions/labels. + + Args: + sem_seg (Tensor or ndarray): the segmentation of shape (H, W). + Each value is the integer label of the pixel. + area_threshold (int): segments with less than `area_threshold` are not drawn. + alpha (float): the larger it is, the more opaque the segmentations are. + + Returns: + output (VisImage): image object with visualizations. + """ + if isinstance(sem_seg, torch.Tensor): + sem_seg = sem_seg.numpy() + labels, areas = np.unique(sem_seg, return_counts=True) + sorted_idxs = np.argsort(-areas).tolist() + labels = labels[sorted_idxs] + for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] + except (AttributeError, IndexError): + mask_color = None + + binary_mask = (sem_seg == label).astype(np.uint8) + text = self.metadata.stuff_classes[label] + self.draw_binary_mask( + binary_mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + return self.output + + def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): + """ + Draw panoptic prediction annotations or results. + + Args: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each + segment. + segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. + If it is a ``list[dict]``, each dict contains keys "id", "category_id". + If None, category id of each pixel is computed by + ``pixel // metadata.label_divisor``. + area_threshold (int): stuff segments with less than `area_threshold` are not drawn. + + Returns: + output (VisImage): image object with visualizations. + """ + pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + text = self.metadata.stuff_classes[category_idx].replace('-other','').replace('-merged','') + self.draw_binary_mask( + mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + + # draw mask for all instances second + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return self.output + masks, sinfo = list(zip(*all_instances)) + category_ids = [x["category_id"] for x in sinfo] + + try: + scores = [x["score"] for x in sinfo] + except KeyError: + scores = None + class_names = [name.replace('-other','').replace('-merged','') for name in self.metadata.thing_classes] + labels = _create_text_labels( + category_ids, scores, class_names, [x.get("iscrowd", 0) for x in sinfo] + ) + + try: + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids + ] + except AttributeError: + colors = None + self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) + + return self.output + + draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility + + def draw_dataset_dict(self, dic): + """ + Draw annotations/segmentaions in Detectron2 Dataset format. + + Args: + dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. + + Returns: + output (VisImage): image object with visualizations. + """ + annos = dic.get("annotations", None) + if annos: + if "segmentation" in annos[0]: + masks = [x["segmentation"] for x in annos] + else: + masks = None + if "keypoints" in annos[0]: + keypts = [x["keypoints"] for x in annos] + keypts = np.array(keypts).reshape(len(annos), -1, 3) + else: + keypts = None + + boxes = [ + BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) + if len(x["bbox"]) == 4 + else x["bbox"] + for x in annos + ] + + colors = None + category_ids = [x["category_id"] for x in annos] + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) + for c in category_ids + ] + names = self.metadata.get("thing_classes", None) + labels = _create_text_labels( + category_ids, + scores=None, + class_names=names, + is_crowd=[x.get("iscrowd", 0) for x in annos], + ) + self.overlay_instances( + labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors + ) + + sem_seg = dic.get("sem_seg", None) + if sem_seg is None and "sem_seg_file_name" in dic: + with PathManager.open(dic["sem_seg_file_name"], "rb") as f: + sem_seg = Image.open(f) + sem_seg = np.asarray(sem_seg, dtype="uint8") + if sem_seg is not None: + self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.4) + + pan_seg = dic.get("pan_seg", None) + if pan_seg is None and "pan_seg_file_name" in dic: + with PathManager.open(dic["pan_seg_file_name"], "rb") as f: + pan_seg = Image.open(f) + pan_seg = np.asarray(pan_seg) + from panopticapi.utils import rgb2id + + pan_seg = rgb2id(pan_seg) + if pan_seg is not None: + segments_info = dic["segments_info"] + pan_seg = torch.tensor(pan_seg) + self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.7) + return self.output + + def overlay_instances( + self, + *, + boxes=None, + labels=None, + masks=None, + keypoints=None, + assigned_colors=None, + alpha=0.5, + ): + """ + Args: + boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, + or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, + or a :class:`RotatedBoxes`, + or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image, + labels (list[str]): the text to be displayed for each instance. + masks (masks-like object): Supported types are: + + * :class:`detectron2.structures.PolygonMasks`, + :class:`detectron2.structures.BitMasks`. + * list[list[ndarray]]: contains the segmentation masks for all objects in one image. + The first level of the list corresponds to individual instances. The second + level to all the polygon that compose the instance, and the third level + to the polygon coordinates. The third level should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + * list[ndarray]: each ndarray is a binary mask of shape (H, W). + * list[dict]: each dict is a COCO-style RLE. + keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), + where the N is the number of instances and K is the number of keypoints. + The last dimension corresponds to (x, y, visibility or score). + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = 0 + if boxes is not None: + boxes = self._convert_boxes(boxes) + num_instances = len(boxes) + if masks is not None: + masks = self._convert_masks(masks) + if num_instances: + assert len(masks) == num_instances + else: + num_instances = len(masks) + if keypoints is not None: + if num_instances: + assert len(keypoints) == num_instances + else: + num_instances = len(keypoints) + keypoints = self._convert_keypoints(keypoints) + if labels is not None: + assert len(labels) == num_instances + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + if boxes is not None and boxes.shape[1] == 5: + return self.overlay_rotated_instances( + boxes=boxes, labels=labels, assigned_colors=assigned_colors + ) + + # Display in largest to smallest order to reduce occlusion. + areas = None + if boxes is not None: + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + elif masks is not None: + areas = np.asarray([x.area() for x in masks]) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + keypoints = keypoints[sorted_idxs] if keypoints is not None else None + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if masks is not None: + for segment in masks[i].polygons: + self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + elif masks is not None: + # skip small mask without polygon + if len(masks[i].polygons) == 0: + continue + + x0, y0, x1, y1 = masks[i].bbox() + + # draw text in the center (defined by median) when box is not drawn + # median is less sensitive to outliers. + text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] + horiz_align = "center" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occlusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + # draw keypoints + if keypoints is not None: + for keypoints_per_instance in keypoints: + self.draw_and_connect_keypoints(keypoints_per_instance) + + return self.output + + def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): + """ + Args: + boxes (ndarray): an Nx5 numpy array of + (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image. + labels (list[str]): the text to be displayed for each instance. + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = len(boxes) + + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + if boxes is not None: + areas = boxes[:, 2] * boxes[:, 3] + + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + colors = [assigned_colors[idx] for idx in sorted_idxs] + + for i in range(num_instances): + self.draw_rotated_box_with_label( + boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None + ) + + return self.output + + def draw_and_connect_keypoints(self, keypoints): + """ + Draws keypoints of an instance and follows the rules for keypoint connections + to draw lines between appropriate keypoints. This follows color heuristics for + line color. + + Args: + keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints + and the last dimension corresponds to (x, y, probability). + + Returns: + output (VisImage): image object with visualizations. + """ + visible = {} + keypoint_names = self.metadata.get("keypoint_names") + for idx, keypoint in enumerate(keypoints): + + # draw keypoint + x, y, prob = keypoint + if prob > self.keypoint_threshold: + self.draw_circle((x, y), color=_RED) + if keypoint_names: + keypoint_name = keypoint_names[idx] + visible[keypoint_name] = (x, y) + + if self.metadata.get("keypoint_connection_rules"): + for kp0, kp1, color in self.metadata.keypoint_connection_rules: + if kp0 in visible and kp1 in visible: + x0, y0 = visible[kp0] + x1, y1 = visible[kp1] + color = tuple(x / 255.0 for x in color) + self.draw_line([x0, x1], [y0, y1], color=color) + + # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip + # Note that this strategy is specific to person keypoints. + # For other keypoints, it should just do nothing + try: + ls_x, ls_y = visible["left_shoulder"] + rs_x, rs_y = visible["right_shoulder"] + mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 + except KeyError: + pass + else: + # draw line from nose to mid-shoulder + nose_x, nose_y = visible.get("nose", (None, None)) + if nose_x is not None: + self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) + + try: + # draw line from mid-shoulder to mid-hip + lh_x, lh_y = visible["left_hip"] + rh_x, rh_y = visible["right_hip"] + except KeyError: + pass + else: + mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 + self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) + return self.output + + """ + Primitive drawing functions: + """ + + def draw_text( + self, + text, + position, + *, + font_size=None, + color="g", + horizontal_alignment="center", + rotation=0, + ): + """ + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + rotation: rotation angle in degrees CCW + + Returns: + output (VisImage): image object with text drawn. + """ + if not font_size: + font_size = self._default_font_size + + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.15) + color[np.argmax(color)] = max(0.8, np.max(color)) + + def contrasting_color(rgb): + """Returns 'white' or 'black' depending on which color contrasts more with the given RGB value.""" + + # Decompose the RGB tuple + R, G, B = rgb + + # Calculate the Y value + Y = 0.299 * R + 0.587 * G + 0.114 * B + + # If Y value is greater than 128, it's closer to white so return black. Otherwise, return white. + return 'black' if Y > 128 else 'white' + + bbox_background = contrasting_color(color*255) + + x, y = position + self.output.ax.text( + x, + y, + text, + size=font_size * self.output.scale, + family="sans-serif", + bbox={"facecolor": bbox_background, "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + rotation=rotation, + ) + return self.output + + def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): + """ + Args: + box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 + are the coordinates of the image's top left corner. x1 and y1 are the + coordinates of the image's bottom right corner. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + + Returns: + output (VisImage): image object with box drawn. + """ + x0, y0, x1, y1 = box_coord + width = x1 - x0 + height = y1 - y0 + + linewidth = max(self._default_font_size / 12, 1) + + self.output.ax.add_patch( + mpl.patches.Rectangle( + (x0, y0), + width, + height, + fill=False, + edgecolor=edge_color, + linewidth=linewidth * self.output.scale, + alpha=alpha, + linestyle=line_style, + ) + ) + return self.output + + def draw_rotated_box_with_label( + self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None + ): + """ + Draw a rotated box with label on its top-left corner. + + Args: + rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), + where cnt_x and cnt_y are the center coordinates of the box. + w and h are the width and height of the box. angle represents how + many degrees the box is rotated CCW with regard to the 0-degree box. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + label (string): label for rotated box. It will not be rendered when set to None. + + Returns: + output (VisImage): image object with box drawn. + """ + cnt_x, cnt_y, w, h, angle = rotated_box + area = w * h + # use thinner lines when the box is small + linewidth = self._default_font_size / ( + 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 + ) + + theta = angle * math.pi / 180.0 + c = math.cos(theta) + s = math.sin(theta) + rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] + # x: left->right ; y: top->down + rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] + for k in range(4): + j = (k + 1) % 4 + self.draw_line( + [rotated_rect[k][0], rotated_rect[j][0]], + [rotated_rect[k][1], rotated_rect[j][1]], + color=edge_color, + linestyle="--" if k == 1 else line_style, + linewidth=linewidth, + ) + + if label is not None: + text_pos = rotated_rect[1] # topleft corner + + height_ratio = h / np.sqrt(self.output.height * self.output.width) + label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size + ) + self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) + + return self.output + + def draw_circle(self, circle_coord, color, radius=3): + """ + Args: + circle_coord (list(int) or tuple(int)): contains the x and y coordinates + of the center of the circle. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + radius (int): radius of the circle. + + Returns: + output (VisImage): image object with box drawn. + """ + x, y = circle_coord + self.output.ax.add_patch( + mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) + ) + return self.output + + def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): + """ + Args: + x_data (list[int]): a list containing x values of all the points being drawn. + Length of list should match the length of y_data. + y_data (list[int]): a list containing y values of all the points being drawn. + Length of list should match the length of x_data. + color: color of the line. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + linestyle: style of the line. Refer to `matplotlib.lines.Line2D` + for a full list of formats that are accepted. + linewidth (float or None): width of the line. When it's None, + a default value will be computed and used. + + Returns: + output (VisImage): image object with line drawn. + """ + if linewidth is None: + linewidth = self._default_font_size / 3 + linewidth = max(linewidth, 1) + self.output.ax.add_line( + mpl.lines.Line2D( + x_data, + y_data, + linewidth=linewidth * self.output.scale, + color=color, + linestyle=linestyle, + ) + ) + return self.output + + def draw_binary_mask( + self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.7, area_threshold=10 + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component smaller than this area will not be shown. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + has_valid_segment = False + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + if area < (area_threshold or 0): + continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + # TODO: Use Path/PathPatch to draw vector graphics: + # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None and has_valid_segment: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_binary_mask_with_number( + self, binary_mask, color=None, *, edge_color=None, text=None, label_mode='1', alpha=0.1, anno_mode=['Mask'], area_threshold=10 + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component smaller than this area will not be shown. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + randint = random.randint(0, len(self.color_proposals)-1) + color = self.color_proposals[randint] + color = mplc.to_rgb(color) + + has_valid_segment = True + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + bbox = mask.bbox() + + if 'Mask' in anno_mode: + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + if area < (area_threshold or 0): + continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + # TODO: Use Path/PathPatch to draw vector graphics: + # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if 'Box' in anno_mode: + self.draw_box(bbox, edge_color=color, alpha=0.75) + + if 'Mark' in anno_mode: + has_valid_segment = True + else: + has_valid_segment = False + + if text is not None and has_valid_segment: + # lighter_color = tuple([x*0.2 for x in color]) + lighter_color = [1,1,1] # self._change_color_brightness(color, brightness_factor=0.7) + self._draw_number_in_mask(binary_mask, text, lighter_color, label_mode) + return self.output + + def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): + """ + Args: + soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + shape2d = (soft_mask.shape[0], soft_mask.shape[1]) + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = soft_mask * alpha + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + binary_mask = (soft_mask > 0.5).astype("uint8") + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): + """ + Args: + segment: numpy array of shape Nx2, containing all the points in the polygon. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. If not provided, a darker shade + of the polygon color will be used instead. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with polygon drawn. + """ + if edge_color is None: + # make edge color darker than the polygon color + if alpha > 0.8: + edge_color = self._change_color_brightness(color, brightness_factor=-0.7) + else: + edge_color = color + edge_color = mplc.to_rgb(edge_color) + (1,) + + polygon = mpl.patches.Polygon( + segment, + fill=True, + facecolor=mplc.to_rgb(color) + (alpha,), + edgecolor=edge_color, + linewidth=max(self._default_font_size // 15 * self.output.scale, 1), + ) + self.output.ax.add_patch(polygon) + return self.output + + """ + Internal methods: + """ + + def _jitter(self, color): + """ + Randomly modifies given color to produce a slightly different color than the color given. + + Args: + color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color + picked. The values in the list are in the [0.0, 1.0] range. + + Returns: + jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the + color after being jittered. The values in the list are in the [0.0, 1.0] range. + """ + color = mplc.to_rgb(color) + # np.random.seed(0) + vec = np.random.rand(3) + # better to do it in another color space + vec = vec / np.linalg.norm(vec) * 0.5 + res = np.clip(vec + color, 0, 1) + return tuple(res) + + def _create_grayscale_image(self, mask=None): + """ + Create a grayscale version of the original image. + The colors in masked area, if given, will be kept. + """ + img_bw = self.img.astype("f4").mean(axis=2) + img_bw = np.stack([img_bw] * 3, axis=2) + if mask is not None: + img_bw[mask] = self.img[mask] + return img_bw + + def _change_color_brightness(self, color, brightness_factor): + """ + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a lighter color. + + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return modified_color + + def _convert_boxes(self, boxes): + """ + Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. + """ + if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): + return boxes.tensor.detach().numpy() + else: + return np.asarray(boxes) + + def _convert_masks(self, masks_or_polygons): + """ + Convert different format of masks or polygons to a tuple of masks and polygons. + + Returns: + list[GenericMask]: + """ + + m = masks_or_polygons + if isinstance(m, PolygonMasks): + m = m.polygons + if isinstance(m, BitMasks): + m = m.tensor.numpy() + if isinstance(m, torch.Tensor): + m = m.numpy() + ret = [] + for x in m: + if isinstance(x, GenericMask): + ret.append(x) + else: + ret.append(GenericMask(x, self.output.height, self.output.width)) + return ret + + def _draw_number_in_mask(self, binary_mask, text, color, label_mode='1'): + """ + Find proper places to draw text given a binary mask. + """ + + def number_to_string(n): + chars = [] + while n: + n, remainder = divmod(n-1, 26) + chars.append(chr(97 + remainder)) + return ''.join(reversed(chars)) + + binary_mask = np.pad(binary_mask, ((1, 1), (1, 1)), 'constant') + mask_dt = cv2.distanceTransform(binary_mask, cv2.DIST_L2, 0) + mask_dt = mask_dt[1:-1, 1:-1] + max_dist = np.max(mask_dt) + coords_y, coords_x = np.where(mask_dt == max_dist) # coords is [y, x] + + if label_mode == 'a': + text = number_to_string(int(text)) + else: + text = text + + self.draw_text(text, (coords_x[len(coords_x)//2] + 2, coords_y[len(coords_y)//2] - 6), color=color) + + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + # _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + # if stats[1:, -1].size == 0: + # return + # largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # # draw text on the largest component, as well as other very large components. + # for cid in range(1, _num_cc): + # if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # # median is more stable than centroid + # # center = centroids[largest_component_id] + # center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + # # bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1] + # # center[1]=bottom[1]+2 + # self.draw_text(text, center, color=color) + + def _draw_text_in_mask(self, binary_mask, text, color): + """ + Find proper places to draw text given a binary mask. + """ + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + if stats[1:, -1].size == 0: + return + largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # draw text on the largest component, as well as other very large components. + for cid in range(1, _num_cc): + if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # median is more stable than centroid + # center = centroids[largest_component_id] + center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1] + center[1]=bottom[1]+2 + self.draw_text(text, center, color=color) + + def _convert_keypoints(self, keypoints): + if isinstance(keypoints, Keypoints): + keypoints = keypoints.tensor + keypoints = np.asarray(keypoints) + return keypoints + + def get_output(self): + """ + Returns: + output (VisImage): the image output containing the visualizations added + to the image. + """ + return self.output \ No newline at end of file