-
-
Save tyoc213/f721ea20f977565489cd30df5f6ffea0 to your computer and use it in GitHub Desktop.
OOM log
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
epoch train_loss valid_loss accuracy time | |
0 0.613234 0.415272 0.808160 01:53 | |
epoch train_loss valid_loss accuracy time | |
0 0.000000 00:01 | |
--------------------------------------------------------------------------- | |
RuntimeError Traceback (most recent call last) | |
<ipython-input-5-99ae1f5d705e> in <module> | |
----> 1 learn.fine_tune(4, 1e-2) | |
~/Documents/github/fastcore/fastcore/logargs.py in _f(*args, **kwargs) | |
54 init_args.update(log) | |
55 setattr(inst, 'init_args', init_args) | |
---> 56 return inst if to_return else f(*args, **kwargs) | |
57 return _f | |
~/Documents/github/fastai/fastai/callback/schedule.py in fine_tune(self, epochs, base_lr, freeze_epochs, lr_mult, pct_start, div, **kwargs) | |
162 base_lr /= 2 | |
163 self.unfreeze() | |
--> 164 self.fit_one_cycle(epochs, slice(base_lr/lr_mult, base_lr), pct_start=pct_start, div=div, **kwargs) | |
165 | |
166 # Cell | |
~/Documents/github/fastcore/fastcore/logargs.py in _f(*args, **kwargs) | |
54 init_args.update(log) | |
55 setattr(inst, 'init_args', init_args) | |
---> 56 return inst if to_return else f(*args, **kwargs) | |
57 return _f | |
~/Documents/github/fastai/fastai/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt) | |
111 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final), | |
112 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))} | |
--> 113 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd) | |
114 | |
115 # Cell | |
~/Documents/github/fastcore/fastcore/logargs.py in _f(*args, **kwargs) | |
54 init_args.update(log) | |
55 setattr(inst, 'init_args', init_args) | |
---> 56 return inst if to_return else f(*args, **kwargs) | |
57 return _f | |
~/Documents/github/fastai/fastai/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt) | |
205 self.opt.set_hypers(lr=self.lr if lr is None else lr) | |
206 self.n_epoch,self.loss = n_epoch,tensor(0.) | |
--> 207 self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup) | |
208 | |
209 def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None | |
~/Documents/github/fastai/fastai/learner.py in _with_events(self, f, event_type, ex, final) | |
153 | |
154 def _with_events(self, f, event_type, ex, final=noop): | |
--> 155 try: self(f'before_{event_type}') ;f() | |
156 except ex: self(f'after_cancel_{event_type}') | |
157 finally: self(f'after_{event_type}') ;final() | |
~/Documents/github/fastai/fastai/learner.py in _do_fit(self) | |
195 for epoch in range(self.n_epoch): | |
196 self.epoch=epoch | |
--> 197 self._with_events(self._do_epoch, 'epoch', CancelEpochException) | |
198 | |
199 @log_args(but='cbs') | |
~/Documents/github/fastai/fastai/learner.py in _with_events(self, f, event_type, ex, final) | |
153 | |
154 def _with_events(self, f, event_type, ex, final=noop): | |
--> 155 try: self(f'before_{event_type}') ;f() | |
156 except ex: self(f'after_cancel_{event_type}') | |
157 finally: self(f'after_{event_type}') ;final() | |
~/Documents/github/fastai/fastai/learner.py in _do_epoch(self) | |
189 | |
190 def _do_epoch(self): | |
--> 191 self._do_epoch_train() | |
192 self._do_epoch_validate() | |
193 | |
~/Documents/github/fastai/fastai/learner.py in _do_epoch_train(self) | |
181 def _do_epoch_train(self): | |
182 self.dl = self.dls.train | |
--> 183 self._with_events(self.all_batches, 'train', CancelTrainException) | |
184 | |
185 def _do_epoch_validate(self, ds_idx=1, dl=None): | |
~/Documents/github/fastai/fastai/learner.py in _with_events(self, f, event_type, ex, final) | |
153 | |
154 def _with_events(self, f, event_type, ex, final=noop): | |
--> 155 try: self(f'before_{event_type}') ;f() | |
156 except ex: self(f'after_cancel_{event_type}') | |
157 finally: self(f'after_{event_type}') ;final() | |
~/Documents/github/fastai/fastai/learner.py in all_batches(self) | |
159 def all_batches(self): | |
160 self.n_iter = len(self.dl) | |
--> 161 for o in enumerate(self.dl): self.one_batch(*o) | |
162 | |
163 def _do_one_batch(self): | |
~/Documents/github/fastai/fastai/learner.py in one_batch(self, i, b) | |
177 self.iter = i | |
178 self._split(b) | |
--> 179 self._with_events(self._do_one_batch, 'batch', CancelBatchException) | |
180 | |
181 def _do_epoch_train(self): | |
~/Documents/github/fastai/fastai/learner.py in _with_events(self, f, event_type, ex, final) | |
153 | |
154 def _with_events(self, f, event_type, ex, final=noop): | |
--> 155 try: self(f'before_{event_type}') ;f() | |
156 except ex: self(f'after_cancel_{event_type}') | |
157 finally: self(f'after_{event_type}') ;final() | |
~/Documents/github/fastai/fastai/learner.py in _do_one_batch(self) | |
162 | |
163 def _do_one_batch(self): | |
--> 164 self.pred = self.model(*self.xb) | |
165 self('after_pred') | |
166 if len(self.yb): self.loss = self.loss_func(self.pred, *self.yb) | |
~/miniconda3/envs/fastai/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) | |
720 result = self._slow_forward(*input, **kwargs) | |
721 else: | |
--> 722 result = self.forward(*input, **kwargs) | |
723 for hook in itertools.chain( | |
724 _global_forward_hooks.values(), | |
~/miniconda3/envs/fastai/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input) | |
115 def forward(self, input): | |
116 for module in self: | |
--> 117 input = module(input) | |
118 return input | |
119 | |
~/miniconda3/envs/fastai/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) | |
720 result = self._slow_forward(*input, **kwargs) | |
721 else: | |
--> 722 result = self.forward(*input, **kwargs) | |
723 for hook in itertools.chain( | |
724 _global_forward_hooks.values(), | |
~/Documents/github/fastai/fastai/text/models/core.py in forward(self, input) | |
79 #Note: this expects that sequence really begins on a round multiple of bptt | |
80 real_bs = (input[:,i] != self.pad_idx).long().sum() | |
---> 81 o = self.module(input[:real_bs,i: min(i+self.bptt, sl)]) | |
82 if self.max_len is None or sl-i <= self.max_len: | |
83 outs.append(o) | |
~/miniconda3/envs/fastai/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) | |
720 result = self._slow_forward(*input, **kwargs) | |
721 else: | |
--> 722 result = self.forward(*input, **kwargs) | |
723 for hook in itertools.chain( | |
724 _global_forward_hooks.values(), | |
~/Documents/github/fastai/fastai/text/models/awdlstm.py in forward(self, inp, from_embeds) | |
104 new_hidden = [] | |
105 for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)): | |
--> 106 output, new_h = rnn(output, self.hidden[l]) | |
107 new_hidden.append(new_h) | |
108 if l != self.n_layers - 1: output = hid_dp(output) | |
~/miniconda3/envs/fastai/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) | |
720 result = self._slow_forward(*input, **kwargs) | |
721 else: | |
--> 722 result = self.forward(*input, **kwargs) | |
723 for hook in itertools.chain( | |
724 _global_forward_hooks.values(), | |
~/Documents/github/fastai/fastai/text/models/awdlstm.py in forward(self, *args) | |
51 # To avoid the warning that comes because the weights aren't flattened. | |
52 warnings.simplefilter("ignore", category=UserWarning) | |
---> 53 return self.module.forward(*args) | |
54 | |
55 def reset(self): | |
~/miniconda3/envs/fastai/lib/python3.8/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx) | |
574 self.check_forward_args(input, hx, batch_sizes) | |
575 if batch_sizes is None: | |
--> 576 result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers, | |
577 self.dropout, self.training, self.bidirectional, self.batch_first) | |
578 else: | |
RuntimeError: CUDA out of memory. Tried to allocate 102.00 MiB (GPU 0; 7.79 GiB total capacity; 6.44 GiB already allocated; 86.12 MiB free; 6.61 GiB reserved in total by PyTorch) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment