Line too long (106 > 79 characters):
3 from utils import save_model, save_pred, get_pred_prefix, get_model_prefix, detach_and_clone, collate_listLine too long (82 > 79 characters):
24 # Using enumerate(iterator) can sometimes leak memory in some environments (!)Line too long (84 > 79 characters):
27 iterator = tqdm(dataset['loader']) if config.progress_bar else dataset['loader']Line too long (87 > 79 characters):
43 y_pred = process_outputs_functions[config.process_outputs_function](y_pred)Line too long (83 > 79 characters):
67 # log after updating the scheduler in case it needs to access the internal logsLine too long (101 > 79 characters):
79 def train(algorithm, datasets, general_logger, result_logger, config, epoch_offset, best_val_metric):Line too long (90 > 79 characters):
85 run_epoch(algorithm, datasets['train'], general_logger, epoch, config, train=True)Line too long (111 > 79 characters):
88 val_results, y_pred = run_epoch(algorithm, datasets['val'], general_logger, epoch, config, train=False)Line too long (88 > 79 characters):
90 general_logger.write(f'Validation {config.val_metric}: {curr_val_metric:.3f}\n')Line too long (96 > 79 characters):
102 general_logger.write(f'Epoch {epoch} has the best validation performance so far.\n')Line too long (97 > 79 characters):
106 save_model_if_needed(algorithm, datasets['val'], epoch, config, is_best, best_val_metric)Line too long (101 > 79 characters):
111 additional_splits = [split for split in datasets.keys() if split not in ['train', 'val']]Line too long (105 > 79 characters):
115 _, y_pred = run_epoch(algorithm, datasets[split], general_logger, epoch, config, train=False)Line too long (80 > 79 characters):
116 save_pred_if_needed(y_pred, datasets[split], epoch, config, is_best)Line too long (89 > 79 characters):
122 def evaluate(algorithm, datasets, epoch, general_logger, result_logger, config, is_best):Line too long (82 > 79 characters):
126 if (not config.evaluate_all_splits) and (split not in config.eval_splits):Line too long (88 > 79 characters):
131 iterator = tqdm(dataset['loader']) if config.progress_bar else dataset['loader']Line too long (91 > 79 characters):
137 y_pred = process_outputs_functions[config.process_outputs_function](y_pred)Line too long (85 > 79 characters):
155 # Skip saving train preds, since the train loader generally shuffles the dataLine too long (95 > 79 characters):
157 save_pred_if_needed(epoch_y_pred, dataset, epoch, config, is_best, force_save=True)Line too long (83 > 79 characters):
171 def save_pred_if_needed(y_pred, dataset, epoch, config, is_best, force_save=False):Line too long (96 > 79 characters):
174 if force_save or (config.save_step is not None and (epoch + 1) % config.save_step == 0):Line too long (86 > 79 characters):
182 def save_model_if_needed(algorithm, dataset, epoch, config, is_best, best_val_metric):Line too long (89 > 79 characters):
185 save_model(algorithm, epoch, best_val_metric, prefix, f'epoch-{epoch}_model.pth')Line too long (85 > 79 characters):
187 save_model(algorithm, epoch, best_val_metric, prefix, 'epoch-last_model.pth')Line too long (85 > 79 characters):
189 save_model(algorithm, epoch, best_val_metric, prefix, 'epoch-best_model.pth')Too many blank lines (2):
36 # These tensors are already detached, but we need to clone them againToo many blank lines (2):
93 if best_val_metric is None:Too many blank lines (3):
106 save_model_if_needed(algorithm, datasets['val'], epoch, config, is_best, best_val_metric)Too many blank lines (3):
122 def evaluate(algorithm, datasets, epoch, general_logger, result_logger, config, is_best):