Safemotion Lib
Loading...
Searching...
No Matches
defaults.py
Go to the documentation of this file.
1from .config import CfgNode as CN
2import os
3# -----------------------------------------------------------------------------
4# Convention about Training / Test specific parameters
5# -----------------------------------------------------------------------------
6# Whenever an argument can be either used for training or for testing, the
7# corresponding name will be post-fixed by a _TRAIN for a training parameter,
8# or _TEST for a test-specific parameter.
9# For example, the number of images during training will be
10# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
11# IMAGES_PER_BATCH_TEST
12
13# -----------------------------------------------------------------------------
14# Config definition
15# -----------------------------------------------------------------------------
16
17_C = CN()
18
19# -----------------------------------------------------------------------------
20# MODEL
21# -----------------------------------------------------------------------------
22_C.MODEL = CN()
23_C.MODEL.DEVICE = "cuda"
24_C.MODEL.META_ARCHITECTURE = 'Baseline'
25_C.MODEL.FREEZE_LAYERS = ['']
26
27# ---------------------------------------------------------------------------- #
28# Backbone options
29# ---------------------------------------------------------------------------- #
30_C.MODEL.BACKBONE = CN()
31
32_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
33_C.MODEL.BACKBONE.DEPTH = "50x"
34_C.MODEL.BACKBONE.LAST_STRIDE = 1
35# Backbone feature dimension
36_C.MODEL.BACKBONE.FEAT_DIM = 2048
37# Normalization method for the convolution layers.
38_C.MODEL.BACKBONE.NORM = "BN"
39# If use IBN block in backbone
40_C.MODEL.BACKBONE.WITH_IBN = False
41# If use SE block in backbone
42_C.MODEL.BACKBONE.WITH_SE = False
43# If use Non-local block in backbone
44_C.MODEL.BACKBONE.WITH_NL = False
45# If use ImageNet pretrain model
46_C.MODEL.BACKBONE.PRETRAIN = True
47# Pretrain model path
48_C.MODEL.BACKBONE.PRETRAIN_PATH = ''
49# If use extra BN for unsupervised pre-traind model
50_C.MODEL.BACKBONE.EXTRA_BN = False
51
52# ---------------------------------------------------------------------------- #
53# REID HEADS options
54# ---------------------------------------------------------------------------- #
55_C.MODEL.HEADS = CN()
56_C.MODEL.HEADS.NAME = "EmbeddingHead"
57# Normalization method for the convolution layers.
58_C.MODEL.HEADS.NORM = "BN"
59# Number of identity
60_C.MODEL.HEADS.NUM_CLASSES = 0
61# Embedding dimension in head
62_C.MODEL.HEADS.EMBEDDING_DIM = 0
63# If use BNneck in embedding
64_C.MODEL.HEADS.WITH_BNNECK = True
65# Triplet feature using feature before(after) bnneck
66_C.MODEL.HEADS.NECK_FEAT = "before" # options: before, after
67# Pooling layer type
68_C.MODEL.HEADS.POOL_LAYER = "avgpool"
69
70# Classification layer type
71_C.MODEL.HEADS.CLS_LAYER = "linear" # "arcSoftmax" or "circleSoftmax"
72
73# Margin and Scale for margin-based classification layer
74_C.MODEL.HEADS.MARGIN = 0.15
75_C.MODEL.HEADS.SCALE = 128
76
77# ---------------------------------------------------------------------------- #
78# REID LOSSES options
79# ---------------------------------------------------------------------------- #
80_C.MODEL.LOSSES = CN()
81_C.MODEL.LOSSES.NAME = ("CrossEntropyLoss",)
82
83# Cross Entropy Loss options
84_C.MODEL.LOSSES.CE = CN()
85# if epsilon == 0, it means no label smooth regularization,
86# if epsilon == -1, it means adaptive label smooth regularization
87_C.MODEL.LOSSES.CE.EPSILON = 0.0
88_C.MODEL.LOSSES.CE.ALPHA = 0.2
89_C.MODEL.LOSSES.CE.SCALE = 1.0
90
91# Triplet Loss options
92_C.MODEL.LOSSES.TRI = CN()
93_C.MODEL.LOSSES.TRI.MARGIN = 0.3
94_C.MODEL.LOSSES.TRI.NORM_FEAT = False
95_C.MODEL.LOSSES.TRI.HARD_MINING = True
96_C.MODEL.LOSSES.TRI.SCALE = 1.0
97
98# Circle Loss options
99_C.MODEL.LOSSES.CIRCLE = CN()
100_C.MODEL.LOSSES.CIRCLE.MARGIN = 0.25
101_C.MODEL.LOSSES.CIRCLE.ALPHA = 128
102_C.MODEL.LOSSES.CIRCLE.SCALE = 1.0
103
104# Focal Loss options
105_C.MODEL.LOSSES.FL = CN()
106_C.MODEL.LOSSES.FL.ALPHA = 0.25
107_C.MODEL.LOSSES.FL.GAMMA = 2
108_C.MODEL.LOSSES.FL.SCALE = 1.0
109
110# Path to a checkpoint file to be loaded to the model. You can find available models in the model zoo.
111_C.MODEL.WEIGHTS = ""
112
113# Values to be used for image normalization
114_C.MODEL.PIXEL_MEAN = [0.485*255, 0.456*255, 0.406*255]
115# Values to be used for image normalization
116_C.MODEL.PIXEL_STD = [0.229*255, 0.224*255, 0.225*255]
117
118
119# -----------------------------------------------------------------------------
120# INPUT
121# -----------------------------------------------------------------------------
122_C.INPUT = CN()
123# Size of the image during training
124_C.INPUT.SIZE_TRAIN = [256, 128]
125# Size of the image during test
126_C.INPUT.SIZE_TEST = [256, 128]
127
128# Random probability for image horizontal flip
129_C.INPUT.DO_FLIP = True
130_C.INPUT.FLIP_PROB = 0.5
131
132# Value of padding size
133_C.INPUT.DO_PAD = True
134_C.INPUT.PADDING_MODE = 'constant'
135_C.INPUT.PADDING = 10
136# Random color jitter
137_C.INPUT.CJ = CN()
138_C.INPUT.CJ.ENABLED = False
139_C.INPUT.CJ.PROB = 0.8
140_C.INPUT.CJ.BRIGHTNESS = 0.15
141_C.INPUT.CJ.CONTRAST = 0.15
142_C.INPUT.CJ.SATURATION = 0.1
143_C.INPUT.CJ.HUE = 0.1
144# Auto augmentation
145_C.INPUT.DO_AUTOAUG = False
146# Augmix augmentation
147_C.INPUT.DO_AUGMIX = False
148# Random Erasing
149_C.INPUT.REA = CN()
150_C.INPUT.REA.ENABLED = False
151_C.INPUT.REA.PROB = 0.5
152_C.INPUT.REA.MEAN = [0.596*255, 0.558*255, 0.497*255] # [0.485*255, 0.456*255, 0.406*255]
153# Random Patch
154_C.INPUT.RPT = CN()
155_C.INPUT.RPT.ENABLED = False
156_C.INPUT.RPT.PROB = 0.5
157
158# -----------------------------------------------------------------------------
159# Dataset
160# -----------------------------------------------------------------------------
161_C.DATASETS = CN()
162# Root of dataset
163_C.DATASETS.ROOT = os.getenv("FASTREID_DATASETS", "datasets")
164# List of the dataset names for training
165_C.DATASETS.NAMES = ("Market1501",)
166# List of the dataset names for testing
167_C.DATASETS.TESTS = ("Market1501",)
168# Other specific dataset setting
169_C.DATASETS.KWARGS = ''
170# Combine trainset and testset joint training
171_C.DATASETS.COMBINEALL = False
172# If dataset in LMDB format
173_C.DATASETS.IS_LMDB = False
174
175# -----------------------------------------------------------------------------
176# DataLoader
177# -----------------------------------------------------------------------------
178_C.DATALOADER = CN()
179# P/K Sampler for data loading
180_C.DATALOADER.PK_SAMPLER = True
181# Naive sampler which don't consider balanced identity sampling
182_C.DATALOADER.NAIVE_WAY = False
183# Number of instance for each person
184_C.DATALOADER.NUM_INSTANCE = 4
185_C.DATALOADER.NUM_WORKERS = 8
186
187# ---------------------------------------------------------------------------- #
188# Solver
189# ---------------------------------------------------------------------------- #
190_C.SOLVER = CN()
191
192# AUTOMATIC MIXED PRECISION
193_C.SOLVER.AMP_ENABLED = False
194
195# Optimizer
196_C.SOLVER.OPT = "Adam"
197
198_C.SOLVER.MAX_ITER = 120
199
200_C.SOLVER.BASE_LR = 3e-4
201_C.SOLVER.BIAS_LR_FACTOR = 1.
202_C.SOLVER.HEADS_LR_FACTOR = 1.
203_C.SOLVER.BACKBONE_BN_LR_FACTOR = 1.
204
205_C.SOLVER.MOMENTUM = 0.9
206
207_C.SOLVER.WEIGHT_DECAY = 0.0005
208_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
209
210# Multi-step learning rate options
211_C.SOLVER.SCHED = "WarmupMultiStepLR"
212_C.SOLVER.GAMMA = 0.1
213_C.SOLVER.STEPS = [30, 55]
214
215# Cosine annealing learning rate options
216_C.SOLVER.DELAY_ITERS = 0
217_C.SOLVER.ETA_MIN_LR = 3e-7
218
219# Warmup options
220_C.SOLVER.WARMUP_FACTOR = 0.1
221_C.SOLVER.WARMUP_ITERS = 10
222_C.SOLVER.WARMUP_METHOD = "linear"
223
224_C.SOLVER.FREEZE_ITERS = 0
225
226# SWA options
227_C.SOLVER.SWA = CN()
228_C.SOLVER.SWA.ENABLED = False
229_C.SOLVER.SWA.ITER = 10
230_C.SOLVER.SWA.PERIOD = 2
231_C.SOLVER.SWA.LR_FACTOR = 10.
232_C.SOLVER.SWA.ETA_MIN_LR = 3.5e-6
233_C.SOLVER.SWA.LR_SCHED = False
234
235_C.SOLVER.CHECKPOINT_PERIOD = 20
236
237# Number of images per batch across all machines.
238# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
239# see 2 images per batch
240_C.SOLVER.IMS_PER_BATCH = 64
241
242# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
243# see 2 images per batch
244_C.TEST = CN()
245
246_C.TEST.EVAL_PERIOD = 20
247
248# Number of images per batch in one process.
249_C.TEST.IMS_PER_BATCH = 64
250_C.TEST.METRIC = "cosine"
251_C.TEST.ROC_ENABLED = False
252
253# Average query expansion
254_C.TEST.AQE = CN()
255_C.TEST.AQE.ENABLED = False
256_C.TEST.AQE.ALPHA = 3.0
257_C.TEST.AQE.QE_TIME = 1
258_C.TEST.AQE.QE_K = 5
259
260# Re-rank
261_C.TEST.RERANK = CN()
262_C.TEST.RERANK.ENABLED = False
263_C.TEST.RERANK.K1 = 20
264_C.TEST.RERANK.K2 = 6
265_C.TEST.RERANK.LAMBDA = 0.3
266
267# Precise batchnorm
268_C.TEST.PRECISE_BN = CN()
269_C.TEST.PRECISE_BN.ENABLED = False
270_C.TEST.PRECISE_BN.DATASET = 'Market1501'
271_C.TEST.PRECISE_BN.NUM_ITER = 300
272
273# ---------------------------------------------------------------------------- #
274# Misc options
275# ---------------------------------------------------------------------------- #
276_C.OUTPUT_DIR = "logs/"
277
278# Benchmark different cudnn algorithms.
279# If input images have very different sizes, this option will have large overhead
280# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
281# If input images have the same or similar sizes, benchmark is often helpful.
282_C.CUDNN_BENCHMARK = False