Unverified Commit a5ee9b41 authored by lizz's avatar lizz Committed by GitHub
Browse files

Fix some typos (#4858)



* Fix some typos
Signed-off-by: default avatarlizz <lizz@sensetime.com>

* More
Signed-off-by: default avatarlizz <lizz@sensetime.com>

* More
Signed-off-by: default avatarlizz <lizz@sensetime.com>

* More
Signed-off-by: default avatarlizz <lizz@sensetime.com>
parent ecb4a07c
......@@ -393,7 +393,7 @@ Function `get_subset_by_classes` in dataset is refactored and only filters out i
- Implement FCOS training tricks (#2935)
- Use BaseDenseHead as base class for anchor-base heads (#2963)
- Add `with_cp` for BasicBlock (#2891)
- Add `stem_channles` argument for ResNet (#2954)
- Add `stem_channels` argument for ResNet (#2954)
**Improvements**
......
......@@ -290,7 +290,7 @@ test_pipeline = [
flip=False, # Whether to flip images during testing
transforms=[
dict(type='Resize', # Use resize augmentation
keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be supressed by the img_scale set above.
keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be suppressed by the img_scale set above.
dict(type='RandomFlip'), # Thought RandomFlip is added in pipeline, it is not used because flip=False
dict(
type='Normalize', # Normalization config, the values are from img_norm_cfg
......
......@@ -48,7 +48,7 @@ class ATSSAssigner(BaseAssigner):
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as postive
the threshold as positive
6. limit the positive sample's center in gt
......
......@@ -6,5 +6,4 @@ class BaseAssigner(metaclass=ABCMeta):
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign boxes to either a ground truth boxe or a negative boxes."""
pass
"""Assign boxes to either a ground truth boxes or a negative boxes."""
......@@ -18,7 +18,7 @@ class HungarianAssigner(BaseAssigner):
This class computes an assignment between the targets and the predictions
based on the costs. The costs are weighted sum of three components:
classfication cost, regression L1 cost and regression iou cost. The
classification cost, regression L1 cost and regression iou cost. The
targets don't include the no_object, so generally there are more
predictions than targets. After the one-to-one matching, the un-matched
are treated as backgrounds. Thus each query prediction will be assigned
......
......@@ -182,7 +182,7 @@ class MaxIoUAssigner(BaseAssigner):
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwirte the assigned_gt_inds assigned
# Low-quality matching will overwrite the assigned_gt_inds assigned
# in Step 3. Thus, the assigned gt might not be the best one for
# prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
......
......@@ -10,10 +10,8 @@ class BaseBBoxCoder(metaclass=ABCMeta):
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
pass
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
pass
......@@ -10,7 +10,7 @@ from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class BucketingBBoxCoder(BaseBBoxCoder):
"""Bucketing BBox Coder for Side-Aware Bounday Localization (SABL).
"""Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented here.
......@@ -255,7 +255,7 @@ def bbox2bucket(proposals,
bucket_cls_d_weights
],
dim=-1)
# ignore second nearest buckets for cls if necessay
# ignore second nearest buckets for cls if necessary
if cls_ignore_neighbor:
bucket_cls_weights = (~((bucket_cls_weights == 1) &
(bucket_labels == 0))).float()
......
......@@ -88,7 +88,7 @@ def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes's last dimenstion is 4
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
......
......@@ -136,7 +136,7 @@ class ClassificationCost(object):
# Following the official DETR repo, contrary to the loss that
# NLL is used, we approximate it in 1 - cls_score[gt_label].
# The 1 is a constant that doesn't change the matching,
# so it can be ommitted.
# so it can be omitted.
cls_score = cls_pred.softmax(-1)
cls_cost = -cls_score[:, gt_labels]
return cls_cost * self.weight
......@@ -179,6 +179,6 @@ class IoUCost(object):
# overlaps: [num_bboxes, num_gt]
overlaps = bbox_overlaps(
bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
# The 1 is a constant that doesn't change the matching, so ommitted.
# The 1 is a constant that doesn't change the matching, so omitted.
iou_cost = -overlaps
return iou_cost * self.weight
......@@ -94,7 +94,7 @@ dataset_aliases = {
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
'cityscapes': ['cityscapes']
}
......
......@@ -124,7 +124,7 @@ def tpfp_imagenet(det_bboxes,
# find best overlapped available gt
for j in range(num_gts):
# different from PASCAL VOC: allow finding other gts if the
# best overlaped ones are already matched by other det bboxes
# best overlapped ones are already matched by other det bboxes
if gt_covered[j]:
continue
elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
......
......@@ -23,7 +23,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The rescaled masks.
"""
pass
@abstractmethod
def resize(self, out_shape, interpolation='nearest'):
......@@ -36,7 +35,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The resized masks.
"""
pass
@abstractmethod
def flip(self, flip_direction='horizontal'):
......@@ -48,7 +46,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The flipped masks.
"""
pass
@abstractmethod
def pad(self, out_shape, pad_val):
......@@ -61,7 +58,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The padded masks.
"""
pass
@abstractmethod
def crop(self, bbox):
......@@ -73,7 +69,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Return:
BaseInstanceMasks: The cropped masks.
"""
pass
@abstractmethod
def crop_and_resize(self,
......@@ -99,18 +94,15 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Return:
BaseInstanceMasks: the cropped and resized masks.
"""
pass
@abstractmethod
def expand(self, expanded_h, expanded_w, top, left):
"""see :class:`Expand`."""
pass
@property
@abstractmethod
def areas(self):
"""ndarray: areas of each instance."""
pass
@abstractmethod
def to_ndarray(self):
......@@ -119,7 +111,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Return:
ndarray: Converted masks in the format of ndarray.
"""
pass
@abstractmethod
def to_tensor(self, dtype, device):
......@@ -132,7 +123,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
Tensor: Converted masks in the format of Tensor.
"""
pass
@abstractmethod
def translate(self,
......@@ -154,7 +144,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
Translated masks.
"""
pass
def shear(self,
out_shape,
......@@ -176,7 +165,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
ndarray: Sheared masks.
"""
pass
@abstractmethod
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
......@@ -195,7 +183,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
Rotated masks.
"""
pass
class BitmapMasks(BaseInstanceMasks):
......
......@@ -262,7 +262,6 @@ class CustomDataset(Dataset):
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def evaluate(self,
results,
......
......@@ -634,7 +634,7 @@ class Translate(object):
min_y = np.maximum(0, min_y + offset)
max_y = np.minimum(h, max_y + offset)
# the boxs translated outside of image will be filtered along with
# the boxes translated outside of image will be filtered along with
# the corresponding masks, by invoking ``_filter_invalid``.
results[key] = np.concatenate([min_x, min_y, max_x, max_y],
axis=-1)
......
......@@ -120,7 +120,7 @@ class Resize(object):
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \
......
......@@ -149,7 +149,7 @@ class ResLayer(nn.Sequential):
rfp_inplanes=None,
**kwargs):
self.block = block
assert downsample_first, f'downsampel_first={downsample_first} is ' \
assert downsample_first, f'downsample_first={downsample_first} is ' \
'not supported in DetectoRS'
downsample = None
......
......@@ -527,7 +527,7 @@ class HRNet(nn.Module):
return y_list
def train(self, mode=True):
"""Convert the model into training mode whill keeping the normalization
"""Convert the model into training mode will keeping the normalization
layer freezed."""
super(HRNet, self).train(mode)
if mode and self.norm_eval:
......
......@@ -21,7 +21,7 @@ class RegNet(ResNet):
- wm (float): quantization parameter to quantize the width
- depth (int): depth of the backbone
- group_w (int): width of group
- bot_mul (float): bottleneck ratio, i.e. expansion of bottlneck.
- bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
strides (Sequence[int]): Strides of the first block of each stage.
base_channels (int): Base channels after stem layer.
in_channels (int): Number of input image channels. Default: 3.
......@@ -252,7 +252,7 @@ class RegNet(ResNet):
@staticmethod
def quantize_float(number, divisor):
"""Converts a float to closest non-zero int divisible by divior.
"""Converts a float to closest non-zero int divisible by divisor.
Args:
number (int): Original number to be quantized.
......
......@@ -197,7 +197,7 @@ class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
"""Forward features of a single scale levle.
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
......@@ -274,7 +274,7 @@ class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerss targets for points
"""Compute regression, classification and centerness targets for points
in multiple images.
Args:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment