Line too long (135 > 79 characters):
12 featurizer, projector, classifier, combined_model = initialize_model(config, d_out, is_featurizer=True, include_projector=True)Line too long (86 > 79 characters):
71 idx_nondrop = idx_perm[drop_num:].sort().values.cuda() # sort for humans/debugLine too long (87 > 79 characters):
78 src_subselect = torch.nonzero(idx_nondrop[...,None] == orig_edge_index[0])[:,1]Line too long (88 > 79 characters):
79 dest_subselect = torch.nonzero(idx_nondrop[...,None] == orig_edge_index[1])[:,1]Line too long (116 > 79 characters):
80 edge_subselect = src_subselect[torch.nonzero(src_subselect[..., None] == dest_subselect)[:,0]].sort().valuesLine too long (114 > 79 characters):
139 edges_to_insert = torch.multinomial(torch.ones(permute_num,node_num), 2, replacement=False).t().cuda()Line too long (104 > 79 characters):
140 insertion_indices = torch.multinomial(torch.ones(edge_num), permute_num, replacement=False)Line too long (86 > 79 characters):
145 orig_edge_index[:,insertion_indices] = edges_to_insert # modified in placeLine too long (99 > 79 characters):
168 # See Appendix A Algo 1 https://yyou1996.github.io/files/neurips2020_graphcl_supplement.pdfLine too long (163 > 79 characters):
169 # Code from: https://github.com/Shen-Lab/GraphCL/blob/d857849d51bb168568267e07007c0b0c8bb6d869/transferLearning_MoleculeNet_PPI/bio/pretrain_graphcl.py#L57Line too long (96 > 79 characters):
175 sim_matrix = torch.einsum('ik,jk->ij', z1, z2) / torch.einsum('i,j->ij', z1_abs, z2_abs)Line too long (124 > 79 characters):
191 # See: https://github.com/Shen-Lab/GraphCL/blob/e9e598d478d4a4bff94a3e95a078569c028f1d88/unsupervised_TU/aug.py#L203Line too long (84 > 79 characters):
193 # and since they are not shuffled, the orig and augmented graphs are alignedLine too long (158 > 79 characters):
194 # See: https://github.com/Shen-Lab/GraphCL/blob/d857849d51bb168568267e07007c0b0c8bb6d869/transferLearning_MoleculeNet_PPI/chem/pretrain_graphcl.py#L80Line too long (82 > 79 characters):
196 # formulation using einsum and subtracting of the diagonal to separate outLine too long (82 > 79 characters):
197 # positive pairs from negative pairs, you can compute the contrastive lossLine too long (83 > 79 characters):
207 # initial intuition is that this might be pretty inefficient since it can'tLine too long (82 > 79 characters):
209 # during training, though, the computations are all torch or native pythonLine too long (114 > 79 characters):
248 aug_mask = torch.multinomial(torch.tensor((1-self.aug_prob, self.aug_prob)), batch_size, replacement=True)Line too long (89 > 79 characters):
300 return self.loss.compute(results['y_pred'], results['y_true'], return_dict=False)Missing whitespace after ',' (in 2 places):
78 src_subselect = torch.nonzero(idx_nondrop[...,None] == orig_edge_index[0])[:,1]Missing whitespace after ',' (in 2 places):
79 dest_subselect = torch.nonzero(idx_nondrop[...,None] == orig_edge_index[1])[:,1]Missing whitespace after ',':
80 edge_subselect = src_subselect[torch.nonzero(src_subselect[..., None] == dest_subselect)[:,0]].sort().valuesMissing whitespace after ',':
82 new_edge_index = orig_edge_index[:,edge_subselect]Missing whitespace after ',':
139 edges_to_insert = torch.multinomial(torch.ones(permute_num,node_num), 2, replacement=False).t().cuda()Missing whitespace after ',':
145 orig_edge_index[:,insertion_indices] = edges_to_insert # modified in placeMissing whitespace after ',':
235 n = torch.randint(2,(1,)).item()