Local variable 'hparam_d_steps_per_g_step' is assigned to but never used:
129 d_steps_per_g = hparam_d_steps_per_g_step = 1Local variable 'num_classes' is assigned to but never used:
191 num_classes = d_outLocal variable 'hparam_d_steps_per_g_step' is assigned to but never used:
266 d_steps_per_g = hparam_d_steps_per_g_step = 1Line too long (90 > 79 characters):
18 featurizer, classifier = initialize_model(config, d_out=d_out, is_featurizer=True)Line too long (85 > 79 characters):
32 assert config.num_train_domains <= 1000 # domain space shouldn't be too largeLine too long (83 > 79 characters):
46 self.discriminator = torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim),Line too long (83 > 79 characters):
48 torch.nn.Linear(emb_dim, emb_dim),Line too long (118 > 79 characters):
50 torch.nn.Linear(emb_dim, config.num_train_domains)).to(config.device)Line too long (80 > 79 characters):
117 disc_loss = F.cross_entropy(disc_out, disc_labels, reduction='none')Line too long (89 > 79 characters):
157 return self.loss.compute(results['y_pred'], results['y_true'], return_dict=False)Line too long (115 > 79 characters):
166 featurizer, pooler, classifier = initialize_model(config, d_out=d_out, is_featurizer=True, is_pooled=False)Line too long (85 > 79 characters):
181 assert config.num_train_domains <= 1000 # domain space shouldn't be too largeLine too long (99 > 79 characters):
194 self.discriminator_gnn = GNN_node(num_layer=2, emb_dim=emb_dim, dropout=0, batchnorm=False,Line too long (111 > 79 characters):
195 dataset_group=config.model_kwargs['dataset_group']).to(config.device)Line too long (101 > 79 characters):
198 self.discriminator_mlp = torch.nn.Linear(emb_dim, config.num_train_domains).to(config.device)Line too long (85 > 79 characters):
201 # torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU(),Line too long (88 > 79 characters):
202 # torch.nn.Linear(emb_dim, config.num_train_domains)Line too long (89 > 79 characters):
294 return self.loss.compute(results['y_pred'], results['y_true'], return_dict=False)