Added comment

This commit is contained in:
Zhengyi Chen 2024-02-05 21:51:17 +00:00
parent 8e0e82f67a
commit b6d2460060
2 changed files with 31 additions and 17 deletions

View file

@ -82,7 +82,7 @@ class PerspectiveEstimator(nn.Module):
self.epsilon = epsilon
self.input_shape = input_shape
self.layer_dict = nn.ModuleDict({
'dilated_conv': nn.Conv2d(
'revpers_dilated_conv0': nn.Conv2d(
in_channels=self.input_shape[1], out_channels=1,
kernel_size=conv_kernel_shape,
padding=conv_padding,
@ -90,11 +90,11 @@ class PerspectiveEstimator(nn.Module):
stride=conv_stride,
dilation=conv_dilation,
), # (N, 1, H, W)
'avg_pooling': nn.AdaptiveAvgPool2d(
'revpers_avg_pooling0': nn.AdaptiveAvgPool2d(
output_size=(pool_capacity, 1)
), # (N, 1, K, 1)
# [?] Do we need to explicitly translate to (N, K) here?
'fc': nn.Linear(
'revpers_fc0': nn.Linear(
in_features=pool_capacity,
out_features=1,
),
@ -115,3 +115,5 @@ class PerspectiveEstimator(nn.Module):
# def unsupervised_loss(predictions, targets):
# [TODO] We need a modified loss -- one that takes advantage of attention instead
# of feature map. I feel like they should work likewise but who knows