homura.vision.transforms package

Submodules

homura.vision.transforms.mixup module

homura.vision.transforms.mixup.mixup(input, target, gamma, indices=None)[source]

mixup: Beyond Empirical Risk Minimization

Parameters
  • input (torch.Tensor) –

  • target (torch.Tensor) –

  • gamma (float) –

  • indices (Optional[torch.Tensor]) –

Returns

Return type

Tuple[torch.Tensor, torch.Tensor]

homura.vision.transforms.mixup.partial_mixup(input, gamma, indices)[source]

mixup: Beyond Empirical Risk Minimization

Parameters
  • input (torch.Tensor) –

  • gamma (float) –

  • indices (torch.Tensor) –

Returns

Return type

torch.Tensor

homura.vision.transforms.transform module

class homura.vision.transforms.transform.CenterCrop(size, target_type=None)[source]

Bases: homura.vision.transforms.transform.GeometricTransformBase

apply_coords(coords, original_wh, params)[source]
Parameters

coords (torch.Tensor) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0, target_type=None)[source]

Bases: homura.vision.transforms.transform.NonGeometricTransformBase

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.ConcatTransform(*transforms, target_type=None)[source]

Bases: homura.vision.transforms.transform.TransformBase

apply_bbox(bbox, params, original_wh)[source]
Parameters
  • bbox (torch.Tensor) –

  • original_wh (Tuple[int, int]) –

Return type

torch.Tensor

apply_coords(coords, original_wh, params)[source]
Parameters
  • coords (torch.Tensor) –

  • original_wh (torch.Tensor) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.GeometricTransformBase(target_type)[source]

Bases: homura.vision.transforms.transform.TransformBase, abc.ABC

apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.NonGeometricTransformBase(target_type)[source]

Bases: homura.vision.transforms.transform.TransformBase, abc.ABC

apply_bbox(bbox, params, original_wh)[source]
Parameters
  • bbox (torch.Tensor) –

  • original_wh (Tuple[int, int]) –

Return type

torch.Tensor

apply_coords(coords, original_wh, params)[source]
Parameters
  • coords (torch.Tensor) –

  • original_wh (torch.Tensor) –

Return type

torch.Tensor

apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.Normalize(mean, std, target_type=None)[source]

Bases: homura.vision.transforms.transform.NonGeometricTransformBase

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.RandomCrop(size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant', mask_fill=255, target_type=None)[source]

Bases: homura.vision.transforms.transform.GeometricTransformBase

apply_coords(coords, original_wh, params)[source]
Parameters

coords (torch.Tensor) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

get_params(image)[source]
Return type

Tuple[int, ..]

class homura.vision.transforms.transform.RandomGrayScale(p=0.5, target_type=None)[source]

Bases: homura.vision.transforms.transform.NonGeometricTransformBase

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

class homura.vision.transforms.transform.RandomHorizontalFlip(p=0.5, target_type=None)[source]

Bases: homura.vision.transforms.transform.GeometricTransformBase

apply_coords(coords, original_wh, params)[source]
Parameters

coords (torch.Tensor) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

get_params(image)[source]
Return type

Optional

class homura.vision.transforms.transform.RandomResize(min_size, max_size=None, target_type=None)[source]

Bases: homura.vision.transforms.transform.GeometricTransformBase

apply_coords(coords, original_wh, params)[source]
Parameters
  • coords (torch.Tensor) –

  • original_wh (Tuple[int, int]) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

get_params(image)[source]
Parameters

image (Optional[torch.Tensor]) –

Return type

Optional

class homura.vision.transforms.transform.RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), target_type=None)[source]

Bases: homura.vision.transforms.transform.GeometricTransformBase

apply_coords(coords, original_wh, params)[source]
Parameters

coords (torch.Tensor) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

get_params(image)[source]
Parameters

image (Optional[torch.Tensor]) –

Return type

Optional

class homura.vision.transforms.transform.RandomRotation(degrees, fill=None, mask_fill=255, target_type=None)[source]

Bases: homura.vision.transforms.transform.GeometricTransformBase

apply_coords(coords, original_wh, params)[source]
Parameters

coords (torch.Tensor) –

Return type

torch.Tensor

apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

get_params(image)[source]
Parameters

image (Optional[torch.Tensor]) –

Return type

Optional

class homura.vision.transforms.transform.TransformBase(target_type)[source]

Bases: abc.ABC

Base class of data augmentation transformations. Transform is expected to be used as drop-in replacements of torchvision’s transforms.

train_da = CenterCrop(224, task=”segmentation”) * ColorJitter(task=”segmentation”) + …

apply_bbox(bbox, params, original_wh)[source]
Parameters
  • bbox (torch.Tensor) –

  • original_wh (Tuple[int, int]) –

Return type

torch.Tensor

abstract apply_coords(coords, original_wh, params)[source]
Parameters
  • coords (torch.Tensor) –

  • original_wh (Tuple[int, int]) –

Return type

torch.Tensor

abstract apply_image(image, params)[source]
Parameters

image (torch.Tensor) –

Return type

torch.Tensor

abstract apply_mask(mask, params)[source]
Parameters

mask (torch.Tensor) –

Return type

torch.Tensor

static ensure_tensor(t, is_input)[source]
Parameters

is_input (bool) –

Return type

torch.Tensor

get_params(image)[source]
Parameters

image (Optional[torch.Tensor]) –

Return type

Optional

supported_target_types = {'bbox', 'mask'}

Module contents