Skip to content

Commit

Permalink
Merge pull request #1062 from mikel-brostrom/optim-reid-preproc
Browse files Browse the repository at this point in the history
Optim reid preproc
  • Loading branch information
mikel-brostrom authored Aug 9, 2023
2 parents 057435d + 506b2e9 commit ca4531d
Show file tree
Hide file tree
Showing 7 changed files with 54 additions and 81 deletions.
1 change: 1 addition & 0 deletions boxmot/appearance/backbones/lmbn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Mikel Broström 🔥 Yolo Tracking 🧾 AGPL-3.0 license
76 changes: 45 additions & 31 deletions boxmot/appearance/reid_multibackend.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
from os.path import exists as file_exists
from pathlib import Path

import cv2
import gdown
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as T

from boxmot.appearance.backbones import build_model
from boxmot.appearance.reid_model_factory import (get_model_name,
Expand Down Expand Up @@ -53,19 +53,7 @@ def __init__(
) = self.model_type(w) # get backend
self.fp16 = fp16
self.fp16 &= self.pt or self.jit or self.engine # FP16

# Build transform functions
self.device = device
self.image_size = (256, 128)
self.pixel_mean = [0.485, 0.456, 0.406]
self.pixel_std = [0.229, 0.224, 0.225]
self.transforms = []
self.transforms += [T.Resize(self.image_size)]
self.transforms += [T.ToTensor()]
self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)]
self.preprocess = T.Compose(self.transforms)
self.to_pil = T.ToPILImage()

self.nhwc = self.tflite # activate bhwc --> bcwh

model_name = get_model_name(w)
Expand Down Expand Up @@ -197,21 +185,39 @@ def model_type(p="path/to/model.pt"):
types = [s in Path(p).name for s in sf]
return types

def _preprocess(self, im_batch):
images = []
for element in im_batch:
image = self.to_pil(element)
image = self.preprocess(image)
images.append(image)
def preprocess(self, xyxys, img):
crops = []
# dets are of different sizes so batch preprocessing is not possible
for box in xyxys:
x1, y1, x2, y2 = box.astype('int')
crop = img[y1:y2, x1:x2]
# resize
crop = cv2.resize(
crop,
(128, 256), # from (x, y) to (128, 256) | (w, h)
interpolation=cv2.INTER_LINEAR,
)

# (cv2) BGR 2 (PIL) RGB. The ReID models have been trained with this channel order
crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)

# normalization
crop = crop / 255

images = torch.stack(images, dim=0)
images = images.to(self.device)
# standardization (RGB channel order)
crop = crop - np.array([0.485, 0.456, 0.406])
crop = crop / np.array([0.229, 0.224, 0.225])

return images
crop = torch.from_numpy(crop).float()
crops.append(crop)

crops = torch.stack(crops, dim=0)
crops = torch.permute(crops, (0, 3, 1, 2))
crops = crops.to(dtype=torch.half if self.fp16 else torch.float, device=self.device)

return crops

def forward(self, im_batch):
# preprocess batch
im_batch = self._preprocess(im_batch)

# batch to half
if self.fp16 and im_batch.dtype != torch.float16:
Expand Down Expand Up @@ -273,18 +279,26 @@ def forward(self, im_batch):

if isinstance(features, (list, tuple)):
return (
self.from_numpy(features[0])
if len(features) == 1
else [self.from_numpy(x) for x in features]
self.to_numpy(features[0]) if len(features) == 1 else [self.to_numpy(x) for x in features]
)
else:
return self.from_numpy(features)
return self.to_numpy(features)

def from_numpy(self, x):
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
def to_numpy(self, x):
return x.cpu().numpy() if isinstance(x, torch.Tensor) else x

def warmup(self, imgsz=[(256, 128, 3)]):
# warmup model by running inference once
if self.device.type != "cpu":
im = [np.empty(*imgsz).astype(np.uint8)] # input
im = np.random.randint(0, 255, *imgsz, dtype=np.uint8)
im = self.preprocess(xyxys=np.array([[0, 0, 128, 256]]), img=im)
self.forward(im) # warmup

@torch.no_grad()
def get_features(self, xyxys, img):
if xyxys.size != 0:
crops = self.preprocess(xyxys, img)
features = self.forward(crops)
else:
features = np.array([])
return features
19 changes: 3 additions & 16 deletions boxmot/trackers/botsort/bot_sort.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from collections import deque

import numpy as np
import torch

from boxmot.appearance.reid_multibackend import ReIDDetectMultiBackend
from boxmot.motion.cmc.sof import SparseOptFlow
Expand Down Expand Up @@ -325,13 +324,14 @@ def update(self, dets, img):
self.height, self.width = img.shape[:2]

"""Extract embeddings """
features_keep = self._get_features(dets, img)
features_keep = self.model.get_features(xyxys[remain_inds], img)
dets[:, :4], img

if len(dets) > 0:
"""Detections"""

detections = [
STrack(xyxy, s, c, f.cpu().numpy())
STrack(xyxy, s, c, f)
for (xyxy, s, c, f) in zip(
dets, scores_keep, classes_keep, features_keep
)
Expand Down Expand Up @@ -505,19 +505,6 @@ def _xywh_to_xyxy(self, bbox_xywh):
y2 = min(int(y + h / 2), self.height - 1)
return x1, y1, x2, y2

@torch.no_grad()
def _get_features(self, bbox_xywh, ori_img):
im_crops = []
for box in bbox_xywh:
x1, y1, x2, y2 = self._xywh_to_xyxy(box)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.model(im_crops)
else:
features = np.array([])
return features


def joint_stracks(tlista, tlistb):
exists = {}
Expand Down
19 changes: 2 additions & 17 deletions boxmot/trackers/deepocsort/deep_ocsort.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
"""

import numpy as np
import torch

from boxmot.appearance.reid_multibackend import ReIDDetectMultiBackend
from boxmot.motion.cmc import get_cmc_method
Expand Down Expand Up @@ -240,7 +239,7 @@ def update_emb(self, emb, alpha=0.9):
self.emb /= np.linalg.norm(self.emb)

def get_emb(self):
return self.emb.cpu()
return self.emb

def apply_affine_correction(self, affine):
m = affine[:, :2]
Expand Down Expand Up @@ -379,7 +378,7 @@ def update(self, dets, img):
else:
# (Ndets x X) [512, 1024, 2048]
# dets_embs = self.embedder.compute_embedding(img, dets[:, :4], tag)
dets_embs = self._get_features(dets[:, :4], img)
dets_embs = self.model.get_features(dets[:, :4], img)

# CMC
if not self.cmc_off:
Expand Down Expand Up @@ -519,17 +518,3 @@ def _xywh_to_xyxy(self, bbox_xywh):
y1 = max(int(y - h / 2), 0)
y2 = min(int(y + h / 2), self.height - 1)
return x1, y1, x2, y2

@torch.no_grad()
def _get_features(self, bbox_xyxy, ori_img):
im_crops = []
for box in bbox_xyxy:
x1, y1, x2, y2 = box.astype(int)
im = ori_img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.model(im_crops).cpu()
else:
features = np.array([])

return features
2 changes: 1 addition & 1 deletion boxmot/trackers/strongsort/sort/detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class Detection(object):
def __init__(self, tlwh, confidence, feature):
self.tlwh = np.asarray(tlwh, dtype=np.float32)
self.confidence = float(confidence)
self.feature = np.asarray(feature.cpu(), dtype=np.float32)
self.feature = feature

def to_xyah(self):
"""Convert bounding box to format `(center x, center y, aspect ratio,
Expand Down
16 changes: 1 addition & 15 deletions boxmot/trackers/strongsort/strong_sort.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Mikel Broström 🔥 Yolo Tracking 🧾 AGPL-3.0 license

import numpy as np
import torch

from boxmot.appearance.reid_multibackend import ReIDDetectMultiBackend
from boxmot.motion.cmc import get_cmc_method
Expand Down Expand Up @@ -64,7 +63,7 @@ def update(self, dets, img):
track.camera_update(warp_matrix)

# extract appearance information for each detection
features = self._get_features(xyxy, img)
features = self.model.get_features(xyxy, img)

tlwh = xyxy2tlwh(xyxy)
detections = [
Expand Down Expand Up @@ -92,16 +91,3 @@ def update(self, dets, img):
)
outputs = np.asarray(outputs)
return outputs

@torch.no_grad()
def _get_features(self, xyxys, img):
im_crops = []
for box in xyxys:
x1, y1, x2, y2 = box.astype('int')
im = img[y1:y2, x1:x2]
im_crops.append(im)
if im_crops:
features = self.model(im_crops)
else:
features = np.array([])
return features
2 changes: 1 addition & 1 deletion boxmot/utils/association.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def associate(
if emb_cost is None:
emb_cost = 0
else:
emb_cost = emb_cost.numpy()
emb_cost = emb_cost
emb_cost[iou_matrix <= 0] = 0
if not aw_off:
emb_cost = compute_aw_max_metric(emb_cost, w_assoc_emb, bottom=aw_param)
Expand Down

0 comments on commit ca4531d

Please sign in to comment.