You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
The batch size of images_unsup_strong is fixed as 1, it generates issue:
preds_student_unsup = model(images_unsup_strong)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/parallel/distributed.py", line 886, in forward
output = self.module(*inputs[0], **kwargs[0])
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/media/zxr409/58fab247-2ac6-40d5-9db7-af402531c0c4/zxq/work/semi-seg/SemiSeg-AEL-main/semseg/models/model_helper.py", line 48, in forward
pred_head = self.decoder([f1, f2,feat1, feat2])
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/media/zxr409/58fab247-2ac6-40d5-9db7-af402531c0c4/zxq/work/semi-seg/SemiSeg-AEL-main/semseg/models/decoder.py", line 54, in forward
aspp_out = self.aspp(x4)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/media/zxr409/58fab247-2ac6-40d5-9db7-af402531c0c4/zxq/work/semi-seg/SemiSeg-AEL-main/semseg/models/base.py", line 47, in forward
feat1 = F.upsample(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/container.py", line 141, in forward
input = module(input)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/batchnorm.py", line 179, in forward
self.eps,
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/functional.py", line 2280, in batch_norm
_verify_batch_size(input.size())
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/functional.py", line 2248, in _verify_batch_size
raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 256, 1, 1])
The text was updated successfully, but these errors were encountered:
The batch size of images_unsup_strong is fixed as 1, it generates issue:
preds_student_unsup = model(images_unsup_strong)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/parallel/distributed.py", line 886, in forward
output = self.module(*inputs[0], **kwargs[0])
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/media/zxr409/58fab247-2ac6-40d5-9db7-af402531c0c4/zxq/work/semi-seg/SemiSeg-AEL-main/semseg/models/model_helper.py", line 48, in forward
pred_head = self.decoder([f1, f2,feat1, feat2])
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/media/zxr409/58fab247-2ac6-40d5-9db7-af402531c0c4/zxq/work/semi-seg/SemiSeg-AEL-main/semseg/models/decoder.py", line 54, in forward
aspp_out = self.aspp(x4)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/media/zxr409/58fab247-2ac6-40d5-9db7-af402531c0c4/zxq/work/semi-seg/SemiSeg-AEL-main/semseg/models/base.py", line 47, in forward
feat1 = F.upsample(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/container.py", line 141, in forward
input = module(input)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/modules/batchnorm.py", line 179, in forward
self.eps,
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/functional.py", line 2280, in batch_norm
_verify_batch_size(input.size())
File "/home/zxr409/anaconda3/envs/zxq_cvpod/lib/python3.7/site-packages/torch/nn/functional.py", line 2248, in _verify_batch_size
raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 256, 1, 1])
The text was updated successfully, but these errors were encountered: