Undefined name 'embedding':
161 embedding = torch.cat((embedding, embedding_i),1) if i>0 else embedding_iUndefined name 'onehot':
185 onehot = torch.cat((onehot, onehot_i), 1) if i>0 else onehot_iLine too long (94 > 79 characters):
19 raise NotImplementedError('Reduction {} not currently implemented.'.format(reduction))Line too long (95 > 79 characters):
26 index = torch.stack([batch, torch.tensor(list(range(batch.shape[0])), device=x.device)], 0)Line too long (107 > 79 characters):
27 x_sparse = torch.sparse.FloatTensor(index, x, torch.Size([torch.max(batch)+1, x.shape[0], x.shape[1]]))Line too long (95 > 79 characters):
35 index = torch.stack([batch, torch.tensor(list(range(batch.shape[0])), device=x.device)], 0)Line too long (107 > 79 characters):
36 x_sparse = torch.sparse.FloatTensor(index, x, torch.Size([torch.max(batch)+1, x.shape[0], x.shape[1]]))Line too long (91 > 79 characters):
46 def __init__(self, encoder_name, d_in_features, d_in_encoder, d_out_encoder, **kwargs):Line too long (99 > 79 characters):
55 # d_in_encoder: number of unique values that will be encoded (size of embedding vocabulary)Line too long (107 > 79 characters):
82 #-------------- embedding of categorical data (linear projection without bias of one hot encodings)Line too long (103 > 79 characters):
84 self.encoder = multi_embedding(d_in_encoder, d_out_encoder, kwargs['aggr'], kwargs['init'])Line too long (131 > 79 characters):
92 full_atom_feature_dims = get_atom_feature_dims() if kwargs['features_scope'] == 'full' else get_atom_feature_dims()[:2]Line too long (132 > 79 characters):
98 full_bond_feature_dims = get_bond_feature_dims() if kwargs['features_scope'] == 'full' else get_bond_feature_dims()[:2]Line too long (100 > 79 characters):
118 raise NotImplementedError('Encoder {} is not currently supported.'.format(encoder_name))Line too long (103 > 79 characters):
128 x = x.float() if self.encoder_name == 'linear' or self.encoder_name == 'mlp' else x.long()Line too long (123 > 79 characters):
140 #-------------- embedding of multiple categorical features. Summation or concatenation of the embeddings is allowedLine too long (89 > 79 characters):
161 embedding = torch.cat((embedding, embedding_i),1) if i>0 else embedding_iLine too long (121 > 79 characters):
165 raise NotImplementedError('multi embedding aggregation {} is not currently supported.'.format(self.aggr))Line too long (89 > 79 characters):
183 onehot_i = torch.zeros((tensor.shape[0], self.d_in[i]), device=tensor.device)Line too long (92 > 79 characters):
218 #-------------- This is a way to create a dummy variable that represents self loops.Line too long (119 > 79 characters):
220 #-------------- Two ways are allowed: extra dummy variable (one hot or embedding) or a vector filled with zerosLine too long (86 > 79 characters):
230 self.encoder = DiscreteEmbedding('one_hot_encoder', 1, [d_ef+1], None)Line too long (91 > 79 characters):
237 self.encoder = DiscreteEmbedding('embedding', None, [1], d_ef, aggr='sum')Line too long (84 > 79 characters):
247 zero_extension = torch.zeros((x_nb.shape[0], 1), device=x_nb.device)Line too long (81 > 79 characters):
249 x_central = torch.zeros((num_nodes,1), device=x_nb.device).long()Line too long (84 > 79 characters):
252 x_central = torch.zeros((num_nodes, self.d_out), device=x_nb.device)Line too long (81 > 79 characters):
255 x_central = torch.zeros((num_nodes,1), device=x_nb.device).long()Line too long (84 > 79 characters):
258 x_central = torch.zeros((num_nodes, self.d_out), device=x_nb.device)Unexpected spaces around keyword / parameter equals (in 2 places):
109 self.encoder = BondEncoder(emb_dim = d_out_encoder)Unexpected spaces around keyword / parameter equals (in 2 places):
136 def __init__(self, d_in, d_out, aggr = 'concat', init=None):