python类型错误

本人刚接触python,想要复现github上的一个项目,在jupyter notebook上跑代码时遇到了一个错误,尝试了许多方法都解决不了
C:\Users\Administrator\DeepTTE\main.py:129: DeprecationWarning: inspect.getargspec() is deprecated, use inspect.signature() or inspect.getfullargspec()
  model_args = inspect.getargspec(model_class.__init__).args
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
~\DeepTTE\main.py in <module>()
    159 
    160 if __name__ == '__main__':
--> 161     run()

~\DeepTTE\main.py in run()
    143 
    144     # model instance
--> 145     model = models.DeepTTE.Net(**kwargs)
    146 
    147     # experiment logger

~\DeepTTE\models\DeepTTE.py in __init__(self, kernel_size, num_filter, pooling_method, num_final_fcs, final_fc_size, alpha)
     89         self.alpha = alpha
     90 
---> 91         self.build()
     92         self.init_weight()
     93 

~\DeepTTE\models\DeepTTE.py in build(self)
    107                                                        kernel_size = self.kernel_size, \
    108                                                        num_filter = self.num_filter, \
--> 109                                                        pooling_method = self.pooling_method
    110         )
    111 

~\DeepTTE\SpatioTemporal.py in __init__(self, attr_size, kernel_size, num_filter, pooling_method, rnn)
     20         self.pooling_method = pooling_method
     21 
---> 22         self.geo_conv = GeoConv.Net(kernel_size = kernel_size, num_filter = num_filter)
     23     #num_filter: output size of each GeoConv + 1:distance of local path + attr_size: output size of attr component
     24         if rnn == 'lstm':

~\DeepTTE\GeoConv.py in __init__(self, kernel_size, num_filter)
     15         self.num_filter = num_filter
     16 
---> 17         self.build()
     18 
     19     def build(self):

~\DeepTTE\GeoConv.py in build(self)
     20         self.state_em = nn.Embedding(2, 2)
     21         self.process_coords = nn.Linear(4, 16)
---> 22         self.conv = nn.Conv1d(16, self.num_filter, self.kernel_size)
     23 
     24     def forward(self, traj, config):

D:\anaconda\lib\site-packages\torch\nn\modules\conv.py in __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, device, dtype)
    288         super(Conv1d, self).__init__(
    289             in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
--> 290             False, _single(0), groups, bias, padding_mode, **factory_kwargs)
    291 
    292     def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):

D:\anaconda\lib\site-packages\torch\nn\modules\conv.py in __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias, padding_mode, device, dtype)
    130         else:
    131             self.weight = Parameter(torch.empty(
--> 132                 (out_channels, in_channels // groups), *kernel_size), **factory_kwargs))
    133         if bias:
    134             self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))

TypeError: empty(): argument 'size' must be tuple of ints, but found element of type NoneType at pos 3

我尝试将132行中的kernel_size和out_channels两个参数分别强制转换成int类型,但是仍然报同样的错误
我认为是这个conv.py文件中pos3位置的参数类型不对,但是我修改后也没有成功
请各位来帮忙看下

must be tuple of ints,就是说这里的类型需要为int类型的元组
比如(1,2,3)


# -*- coding: utf-8 -*-
import math
import warnings

import torch
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes

from ..common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple, Union

convolution_notes = \
    {"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs.
      :attr:`in_channels` and :attr:`out_channels` must both be divisible by
      :attr:`groups`. For example,

        * At groups=1, all inputs are convolved to all outputs.
        * At groups=2, the operation becomes equivalent to having two conv
          layers side by side, each seeing half the input channels
          and producing half the output channels, and both subsequently
          concatenated.
        * At groups= :attr:`in_channels`, each input channel is convolved with
          its own set of filters (of size
          :math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).""",

        "depthwise_separable_note": r"""When `groups == in_channels` and `out_channels == K * in_channels`,
        where `K` is a positive integer, this operation is also known as a "depthwise convolution".

        In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
        a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
        :math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""}  # noqa: B950





class _ConvNd(Module):

    __constants__ = ['stride', 'padding', 'dilation', 'groups',
                     'padding_mode', 'output_padding', 'in_channels',
                     'out_channels', 'kernel_size']
    __annotations__ = {'bias': Optional[torch.Tensor]}

    def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
        ...

    _in_channels: int
    _reversed_padding_repeated_twice: List[int]
    out_channels: int
    kernel_size: Tuple[int, ...]
    stride: Tuple[int, ...]
    padding: Union[str, Tuple[int, ...]]
    dilation: Tuple[int, ...]
    transposed: bool
    output_padding: Tuple[int, ...]
    groups: int
    padding_mode: str
    weight: Tensor
    bias: Optional[Tensor]

    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 kernel_size: Tuple[int, ...],
                 stride: Tuple[int, ...],
                 padding: Tuple[int, ...],
                 dilation: Tuple[int, ...],
                 transposed: bool,
                 output_padding: Tuple[int, ...],
                 groups: int,
                 bias: bool,
                 padding_mode: str,
                 device=None,
                 dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(_ConvNd, self).__init__()
        if in_channels % groups != 0:
            raise ValueError('in_channels must be divisible by groups')
        if out_channels % groups != 0:
            raise ValueError('out_channels must be divisible by groups')
        valid_padding_strings = {'same', 'valid'}
        if isinstance(padding, str):
            if padding not in valid_padding_strings:
                raise ValueError(
                    "Invalid padding string {!r}, should be one of {}".format(
                        padding, valid_padding_strings))
            if padding == 'same' and any(s != 1 for s in stride):
                raise ValueError("padding='same' is not supported for strided convolutions")

        valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
        if padding_mode not in valid_padding_modes:
            raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
                valid_padding_modes, padding_mode))
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.transposed = transposed
        self.output_padding = output_padding
        self.groups = groups
        self.padding_mode = padding_mode
        # `_reversed_padding_repeated_twice` is the padding to be passed to
        # `F.pad` if needed (e.g., for non-zero padding types that are
        # implemented as two ops: padding + conv). `F.pad` accepts paddings in
        # reverse order than the dimension.
        if isinstance(self.padding, str):
            self._reversed_padding_repeated_twice = [0, 0] * len(kernel_size)
            if padding == 'same':
                for d, k, i in zip(dilation, kernel_size,
                                   range(len(kernel_size) - 1, -1, -1)):
                    total_padding = d * (k - 1)
                    left_pad = total_padding // 2
                    self._reversed_padding_repeated_twice[2 * i] = left_pad
                    self._reversed_padding_repeated_twice[2 * i + 1] = (
                        total_padding - left_pad)
        else:
            self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)

        if transposed:
            self.weight = Parameter(torch.empty(
                (in_channels, out_channels // groups, *kernel_size), **factory_kwargs))
        else:
            self.weight = Parameter(torch.empty(
                (out_channels, in_channels // groups), *kernel_size), **factory_kwargs))
        if bias:
            self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()

我把部分代码放上来了,能帮我看看在哪里修改吗,我自己修改完好像还是不对