import numpy as np import pickle as pkl import networkx as nx import scipy.sparse as sp def parse_index_file(filename): index = [] for line in open(filename): index.append(int(line.strip())) return index # Documentation: https://github.com/kimiyoung/planetoid # x: the feature vectors of the labeled training instances # tx: the feature vectors of the test instances # allx: the feature vectors of both labeled and unlabeled training instances (a superset of x) # graph: a dict in the format {index: [index_of_neighbor_nodes]} def load_data(dataset): # load the data: x, tx, allx, graph names = ['x', 'tx', 'allx', 'graph'] objects = [] for i in range(len(names)): objects.append(pkl.load(open("data/ind.{}.{}".format(dataset, names[i])))) x, tx, allx, graph = tuple(objects) test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset)) test_idx_range = np.sort(test_idx_reorder) if dataset == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended features = sp.vstack((allx, tx)).tolil() features[test_idx_reorder, :] = features[test_idx_range, :] adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) return adj, features