diff --git a/mmaction/models/action_segmentors/asformer.py b/mmaction/models/action_segmentors/asformer.py index 534595a19c..5de3f4f0ba 100644 --- a/mmaction/models/action_segmentors/asformer.py +++ b/mmaction/models/action_segmentors/asformer.py @@ -167,6 +167,7 @@ def predict(self, batch_inputs, batch_data_samples, **kwargs): output = [dict(ground=ground, recognition=recognition)] return output + def exponential_descrease(idx_decoder, p=3): return math.exp(-p * idx_decoder) @@ -571,7 +572,8 @@ def forward(self, x, fencoder, mask): class MyTransformer(nn.Module): - """An encoder-decoder transformer""" + """An encoder-decoder transformer.""" + def __init__(self, num_decoders, num_layers, r1, r2, num_f_maps, input_dim, num_classes, channel_masking_rate): super(MyTransformer, self).__init__() @@ -601,7 +603,7 @@ def __init__(self, num_decoders, num_layers, r1, r2, num_f_maps, input_dim, def forward(self, x, mask): """Define the computation performed at every call. - + Args: x (torch.Tensor): The input data. Returns: @@ -616,4 +618,4 @@ def forward(self, x, mask): feature * mask[:, 0:1, :], mask) outputs = torch.cat((outputs, out.unsqueeze(0)), dim=0) - return outputs \ No newline at end of file + return outputs