File "/work/09235/kunjal/ls6/TypeT5/train_model.py", line 112, in <module>
wrapper = train_spot_model(
File "/work/09235/kunjal/ls6/TypeT5/typet5/train.py", line 180, in train_spot_model
trainer.fit(
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py", line 520, in fit
call._call_and_handle_interrupt(
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/trainer/call.py", line 44, in _call_and_handle_interrupt
return trainer_fn(*args, **kwargs)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py", line 559, in _fit_impl
self._run(model, ckpt_path=ckpt_path)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py", line 935, in _run
results = self._run_stage()
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/trainer/trainer.py", line 978, in _run_stage
self.fit_loop.run()
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/fit_loop.py", line 201, in run
self.advance()
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/fit_loop.py", line 354, in advance
self.epoch_loop.run(self._data_fetcher)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/training_epoch_loop.py", line 133, in run
self.advance(data_fetcher)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/training_epoch_loop.py", line 218, in advance
batch_output = self.automatic_optimization.run(trainer.optimizers[0], kwargs)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 164, in run
closure = self._make_closure(kwargs, optimizer)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 197, in _make_closure
zero_grad_fn = self._make_zero_grad_fn(kwargs.get("batch_idx", 0), optimizer)
File "/home1/09235/kunjal/miniconda3/lib/python3.10/site-packages/pytorch_lightning/loops/optimization/automatic.py", line 213, in _make_zero_grad_fn
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
TypeError: unsupported operand type(s) for %: 'int' and 'NoneType'
我正在尝试从https://github.com/utopia-group/TypeT5/blob/master/scripts/train_model.py运行train_model.py
。我得到这个错误。如何解决?
1条答案
按热度按时间aurhwmvo1#
accumulate_grad_batches
可能是你的lightning配置文件中的null
或None
,将其更改为accumulate_grad_batches:1