'sklearn.preprocessing.KBinsDiscretizer' imported but unused:
3 from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, MinMaxScaler, StandardScaler'sklearn.preprocessing.MinMaxScaler' imported but unused:
3 from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, MinMaxScaler, StandardScaler'sklearn.preprocessing.OneHotEncoder' imported but unused:
3 from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, MinMaxScaler, StandardScaler'sklearn.preprocessing.StandardScaler' imported but unused:
3 from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, MinMaxScaler, StandardScalerLine too long (95 > 79 characters):
3 from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, MinMaxScaler, StandardScalerLine too long (89 > 79 characters):
10 Encodes categorical variables such as structural identifiers and degree features.Line too long (88 > 79 characters):
44 uniques, corrs = np.unique(tensor_list[:, col], return_inverse=True, axis=0)Line too long (94 > 79 characters):
55 translated = torch.LongTensor(self.corrs[col][pointer:pointer+n]).unsqueeze(1)Line too long (88 > 79 characters):
56 encoded = torch.cat((encoded, translated), 1) if col > 0 else translatedLine too long (85 > 79 characters):
66 self.d = [int(tensor_list[:,i].max()+1) for i in range(tensor_list.shape[1])]Line too long (103 > 79 characters):
72 # NB: this encoding scheme has been implemented, but never tested in experiments: use at your own risk.Line too long (81 > 79 characters):
78 range_scaler = [0.0, 1.0] if kwargs['range'] is None else kwargs['range']Line too long (103 > 79 characters):
100 # NB: this encoding scheme has been implemented, but never tested in experiments: use at your own risk.Line too long (94 > 79 characters):
124 encoder = KBinsDiscretizer(n_bins=B, encode='ordinal', strategy=self.strategy)Line too long (83 > 79 characters):
129 translated = result if col == 0 else torch.cat((translated, result), 1)