Skip to content

Commit f1fb9b4

Browse files
committed
merge pull request VainF#76 from upstream
2 parents 4e1087d + 48f990b commit f1fb9b4

15 files changed

Lines changed: 170 additions & 147 deletions

File tree

datasets/cityscapes.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -90,15 +90,15 @@ def __init__(self, root, split='train', mode='fine', target_type='semantic', tra
9090
if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
9191
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
9292
' specified "split" and "mode" are inside the "root" directory')
93-
93+
9494
for city in os.listdir(self.images_dir):
9595
img_dir = os.path.join(self.images_dir, city)
9696
target_dir = os.path.join(self.targets_dir, city)
9797

9898
for file_name in os.listdir(img_dir):
9999
self.images.append(os.path.join(img_dir, file_name))
100-
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
101-
self._get_target_suffix(self.mode, self.target_type))
100+
target_name = f"{file_name.split('_leftImg8bit')[0]}_{self._get_target_suffix(self.mode, self.target_type)}"
101+
102102
self.targets.append(os.path.join(target_dir, target_name))
103103

104104
@classmethod
@@ -135,13 +135,13 @@ def _load_json(self, path):
135135
return data
136136

137137
def _get_target_suffix(self, mode, target_type):
138-
if target_type == 'instance':
139-
return '{}_instanceIds.png'.format(mode)
140-
elif target_type == 'semantic':
141-
return '{}_labelIds.png'.format(mode)
142-
elif target_type == 'color':
143-
return '{}_color.png'.format(mode)
144-
elif target_type == 'polygon':
145-
return '{}_polygons.json'.format(mode)
138+
if target_type == 'color':
139+
return f'{mode}_color.png'
146140
elif target_type == 'depth':
147-
return '{}_disparity.png'.format(mode)
141+
return f'{mode}_disparity.png'
142+
elif target_type == 'instance':
143+
return f'{mode}_instanceIds.png'
144+
elif target_type == 'polygon':
145+
return f'{mode}_polygons.json'
146+
elif target_type == 'semantic':
147+
return f'{mode}_labelIds.png'

datasets/utils.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,7 @@ def makedir_exist_ok(dirpath):
3838
try:
3939
os.makedirs(dirpath)
4040
except OSError as e:
41-
if e.errno == errno.EEXIST:
42-
pass
43-
else:
41+
if e.errno != errno.EEXIST:
4442
raise
4543

4644

@@ -63,10 +61,10 @@ def download_url(url, root, filename=None, md5=None):
6361

6462
# downloads file
6563
if os.path.isfile(fpath) and check_integrity(fpath, md5):
66-
print('Using downloaded and verified file: ' + fpath)
64+
print(f'Using downloaded and verified file: {fpath}')
6765
else:
6866
try:
69-
print('Downloading ' + url + ' to ' + fpath)
67+
print(f'Downloading {url} to {fpath}')
7068
urllib.request.urlretrieve(
7169
url, fpath,
7270
reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))

datasets/voc.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,14 @@ def __init__(self,
9393
if year=='2012_aug':
9494
is_aug = True
9595
year = '2012'
96-
96+
9797
self.root = os.path.expanduser(root)
9898
self.year = year
9999
self.url = DATASET_YEAR_DICT[year]['url']
100100
self.filename = DATASET_YEAR_DICT[year]['filename']
101101
self.md5 = DATASET_YEAR_DICT[year]['md5']
102102
self.transform = transform
103-
103+
104104
self.image_set = image_set
105105
base_dir = DATASET_YEAR_DICT[year]['base_dir']
106106
voc_root = os.path.join(self.root, base_dir)
@@ -112,7 +112,7 @@ def __init__(self,
112112
if not os.path.isdir(voc_root):
113113
raise RuntimeError('Dataset not found or corrupted.' +
114114
' You can use download=True to download it')
115-
115+
116116
if is_aug and image_set=='train':
117117
mask_dir = os.path.join(voc_root, 'SegmentationClassAug')
118118
assert os.path.exists(mask_dir), "SegmentationClassAug not found, please refer to README.md and prepare it manually"
@@ -129,9 +129,9 @@ def __init__(self,
129129

130130
with open(os.path.join(split_f), "r") as f:
131131
file_names = [x.strip() for x in f.readlines()]
132-
133-
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
134-
self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names]
132+
133+
self.images = [os.path.join(image_dir, f'{x}.jpg') for x in file_names]
134+
self.masks = [os.path.join(mask_dir, f'{x}.png') for x in file_names]
135135
assert (len(self.images) == len(self.masks))
136136

137137
def __getitem__(self, index):

main.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,15 @@ def get_argparser():
3232
help="num classes (default: None)")
3333

3434
# Deeplab Options
35-
available_models = sorted(name for name in network.modeling.__dict__ if name.islower() and \
36-
not (name.startswith("__") or name.startswith('_')) and callable(
37-
network.modeling.__dict__[name])
38-
)
35+
available_models = sorted(
36+
name
37+
for name in network.modeling.__dict__
38+
if name.islower()
39+
and not name.startswith("__")
40+
and not name.startswith('_')
41+
and callable(network.modeling.__dict__[name])
42+
)
43+
3944
parser.add_argument("--model", type=str, default='deeplabv3plus_mobilenet',
4045
choices=available_models, help='model name')
4146
parser.add_argument("--separable_conv", action='store_true', default=False,
@@ -281,7 +286,7 @@ def save_ckpt(path):
281286
"scheduler_state": scheduler.state_dict(),
282287
"best_score": best_score,
283288
}, path)
284-
print("Model saved as %s" % path)
289+
print(f"Model saved as {path}")
285290

286291
utils.mkdir('checkpoints')
287292
# Restore

metrics/stream_metrics.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,10 @@ def to_str(results):
4848

4949
def _fast_hist(self, label_true, label_pred):
5050
mask = (label_true >= 0) & (label_true < self.n_classes)
51-
hist = np.bincount(
51+
return np.bincount(
5252
self.n_classes * label_true[mask].astype(int) + label_pred[mask],
5353
minlength=self.n_classes ** 2,
5454
).reshape(self.n_classes, self.n_classes)
55-
return hist
5655

5756
def get_results(self):
5857
"""Returns accuracy score evaluation result.
@@ -85,7 +84,7 @@ def reset(self):
8584
class AverageMeter(object):
8685
"""Computes average values"""
8786
def __init__(self):
88-
self.book = dict()
87+
self.book = {}
8988

9089
def reset_all(self):
9190
self.book.clear()

network/_deeplab.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -134,11 +134,13 @@ class ASPP(nn.Module):
134134
def __init__(self, in_channels, atrous_rates):
135135
super(ASPP, self).__init__()
136136
out_channels = 256
137-
modules = []
138-
modules.append(nn.Sequential(
139-
nn.Conv2d(in_channels, out_channels, 1, bias=False),
140-
nn.BatchNorm2d(out_channels),
141-
nn.ReLU(inplace=True)))
137+
modules = [
138+
nn.Sequential(
139+
nn.Conv2d(in_channels, out_channels, 1, bias=False),
140+
nn.BatchNorm2d(out_channels),
141+
nn.ReLU(inplace=True),
142+
)
143+
]
142144

143145
rate1, rate2, rate3 = tuple(atrous_rates)
144146
modules.append(ASPPConv(in_channels, out_channels, rate1))
@@ -155,9 +157,7 @@ def __init__(self, in_channels, atrous_rates):
155157
nn.Dropout(0.1),)
156158

157159
def forward(self, x):
158-
res = []
159-
for conv in self.convs:
160-
res.append(conv(x))
160+
res = [conv(x) for conv in self.convs]
161161
res = torch.cat(res, dim=1)
162162
return self.project(res)
163163

network/backbone/hrnetv2.py

Lines changed: 26 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -127,16 +127,28 @@ def __init__(self, stage, output_branches, c):
127127
nn.Upsample(scale_factor=(2.0 ** (branch_number - branch_output_number)), mode='nearest'),
128128
))
129129
elif branch_number < branch_output_number:
130-
downsampling_fusion = []
131-
for _ in range(branch_output_number - branch_number - 1):
132-
downsampling_fusion.append(nn.Sequential(
133-
nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_number), kernel_size=3, stride=2,
134-
padding=1,
135-
bias=False),
136-
nn.BatchNorm2d(c * (2 ** branch_number), eps=1e-05, momentum=0.1, affine=True,
137-
track_running_stats=True),
130+
downsampling_fusion = [
131+
nn.Sequential(
132+
nn.Conv2d(
133+
c * (2 ** branch_number),
134+
c * (2 ** branch_number),
135+
kernel_size=3,
136+
stride=2,
137+
padding=1,
138+
bias=False,
139+
),
140+
nn.BatchNorm2d(
141+
c * (2 ** branch_number),
142+
eps=1e-05,
143+
momentum=0.1,
144+
affine=True,
145+
track_running_stats=True,
146+
),
138147
nn.ReLU(inplace=True),
139-
))
148+
)
149+
for _ in range(branch_output_number - branch_number - 1)
150+
]
151+
140152
downsampling_fusion.append(nn.Sequential(
141153
nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_output_number), kernel_size=3,
142154
stride=2, padding=1,
@@ -233,15 +245,17 @@ def __init__(self, c=48, num_blocks=[1, 4, 3], num_classes=1000):
233245

234246
# Classifier (extra module if want to use for classification):
235247
# pool, reduce dimensionality, flatten, connect to linear layer for classification:
236-
out_channels = sum([c * 2 ** i for i in range(len(num_blocks)+1)]) # total output channels of HRNetV2
248+
out_channels = sum(c * 2 ** i for i in range(len(num_blocks)+1))
237249
pool_feature_map = 8
238250
self.bn_classifier = nn.Sequential(
239251
nn.Conv2d(out_channels, out_channels // 4, kernel_size=1, bias=False),
240-
nn.BatchNorm2d(out_channels // 4, eps=1e-05, affine=True, track_running_stats=True),
252+
nn.BatchNorm2d(
253+
out_channels // 4, eps=1e-05, affine=True, track_running_stats=True
254+
),
241255
nn.ReLU(inplace=True),
242256
nn.AdaptiveAvgPool2d(pool_feature_map),
243257
nn.Flatten(),
244-
nn.Linear(pool_feature_map * pool_feature_map * (out_channels // 4), num_classes),
258+
nn.Linear(pool_feature_map ** 2 * (out_channels // 4), num_classes),
245259
)
246260

247261
@staticmethod

network/backbone/mobilenetv2.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,7 @@ def __init__(self, inp, oup, stride, dilation, expand_ratio):
7676

7777
def forward(self, x):
7878
x_pad = F.pad(x, self.input_padding)
79-
if self.use_res_connect:
80-
return x + self.conv(x_pad)
81-
else:
82-
return self.conv(x_pad)
79+
return x + self.conv(x_pad) if self.use_res_connect else self.conv(x_pad)
8380

8481
class MobileNetV2(nn.Module):
8582
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
@@ -95,10 +92,7 @@ def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_r
9592
"""
9693
super(MobileNetV2, self).__init__()
9794
block = InvertedResidual
98-
input_channel = 32
99-
last_channel = 1280
10095
self.output_stride = output_stride
101-
current_stride = 1
10296
if inverted_residual_setting is None:
10397
inverted_residual_setting = [
10498
# t, c, n, s
@@ -113,14 +107,18 @@ def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_r
113107

114108
# only check the first element, assuming user knows t,c,n,s are required
115109
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
116-
raise ValueError("inverted_residual_setting should be non-empty "
117-
"or a 4-element list, got {}".format(inverted_residual_setting))
110+
raise ValueError(
111+
f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}"
112+
)
113+
118114

115+
input_channel = 32
119116
# building first layer
120117
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
118+
last_channel = 1280
121119
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
122120
features = [ConvBNReLU(3, input_channel, stride=2)]
123-
current_stride *= 2
121+
current_stride = 1 * 2
124122
dilation=1
125123
previous_dilation = 1
126124

network/backbone/resnet.py

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,10 @@ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
135135
# the 2x2 stride with a dilated convolution instead
136136
replace_stride_with_dilation = [False, False, False]
137137
if len(replace_stride_with_dilation) != 3:
138-
raise ValueError("replace_stride_with_dilation should be None "
139-
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
138+
raise ValueError(
139+
f"replace_stride_with_dilation should be None or a 3-element tuple, got {replace_stride_with_dilation}"
140+
)
141+
140142
self.groups = groups
141143
self.base_width = width_per_group
142144
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
@@ -184,14 +186,31 @@ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
184186
norm_layer(planes * block.expansion),
185187
)
186188

187-
layers = []
188-
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
189-
self.base_width, previous_dilation, norm_layer))
189+
layers = [
190+
block(
191+
self.inplanes,
192+
planes,
193+
stride,
194+
downsample,
195+
self.groups,
196+
self.base_width,
197+
previous_dilation,
198+
norm_layer,
199+
)
200+
]
201+
190202
self.inplanes = planes * block.expansion
191-
for _ in range(1, blocks):
192-
layers.append(block(self.inplanes, planes, groups=self.groups,
193-
base_width=self.base_width, dilation=self.dilation,
194-
norm_layer=norm_layer))
203+
layers.extend(
204+
block(
205+
self.inplanes,
206+
planes,
207+
groups=self.groups,
208+
base_width=self.base_width,
209+
dilation=self.dilation,
210+
norm_layer=norm_layer,
211+
)
212+
for _ in range(1, blocks)
213+
)
195214

196215
return nn.Sequential(*layers)
197216

0 commit comments

Comments
 (0)