Skip to content

Commit

Permalink
add
Browse files Browse the repository at this point in the history
  • Loading branch information
HuichuanLI committed Sep 11, 2021
1 parent eca30bc commit c42c6b7
Show file tree
Hide file tree
Showing 5 changed files with 60 additions and 5 deletions.
Binary file added gcn/__pycache__/gcn.cpython-36.pyc
Binary file not shown.
Binary file added gcn/__pycache__/gcn.cpython-39.pyc
Binary file not shown.
Binary file added gcn/__pycache__/utils.cpython-36.pyc
Binary file not shown.
12 changes: 7 additions & 5 deletions gcn/gcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,11 @@ def build(self, input_shapes):
def call(self, inputs, training=None, **kwargs):
features, A = inputs
features = self.dropout(features, training=training)
output = tf.matmul(tf.sparse_tensor_dense_matmul(
A, features), self.kernel)
if self.bias:
output = tf.matmul(tf.matmul(A, features), self.kernel)
if self.use_bias:
output += self.bias
act = self.activation(output)

act._uses_learning_phase = features._uses_learning_phase
return act

def get_config(self):
Expand All @@ -78,7 +76,7 @@ def get_config(self):

def GCN(adj_dim, feature_dim, n_hidden, num_class, num_layers=2, activation=tf.nn.relu, dropout_rate=0.5, l2_reg=0,
feature_less=True, ):
Adj = Input(shape=(None,), sparse=True)
Adj = Input(shape=(None,), sparse=False)
if feature_less:
X_in = Input(shape=(1,), )

Expand All @@ -100,4 +98,8 @@ def GCN(adj_dim, feature_dim, n_hidden, num_class, num_layers=2, activation=tf.n
output = h
model = Model(inputs=[X_in, Adj], outputs=output)

model.__setattr__("embedding", output)
model.__setattr__("adj_input", Adj)
model.__setattr__("feature_input", X_in)

return model
53 changes: 53 additions & 0 deletions gcn/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# -*- coding:utf-8 -*-
# @Time : 2021/9/11 10:32 下午
# @Author : huichuan LI
# @File : main.py
# @Software: PyCharm

import scipy.sparse as sp
import numpy as np
from gcn import GCN
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from utils import load_data
import tensorflow

if __name__ == "__main__":
# Read data

FEATURE_LESS = False

adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(path="../Graph/data/cora/")

if FEATURE_LESS:
X = np.arange(adj.shape[-1])
feature_dim = adj.shape[-1]
else:
X = features
feature_dim = X.shape[-1]

model_input = [X.toarray(), adj.toarray()]

model = GCN(adj.shape[-1], feature_dim, 16, y_train.shape[1], dropout_rate=0.5, l2_reg=2.5e-4,
feature_less=FEATURE_LESS, )

model.compile(optimizer=Adam(0.01), loss='categorical_crossentropy',
weighted_metrics=['categorical_crossentropy', 'acc', tensorflow.keras.metrics.AUC(name='auc'),
])

NB_EPOCH = 200
PATIENCE = 200 # early stopping patience

val_data = (model_input, y_val, val_mask)
# train
print("start training")
model.fit(model_input, y_train, sample_weight=train_mask, validation_data=val_data,
batch_size=adj.shape[0], epochs=NB_EPOCH, shuffle=False, verbose=2)

model_input = [adj.toarray(), features.toarray()]
user_embedding_model = Model(inputs=[model.adj_input, model.feature_input], outputs=model.embedding)

user_embs = user_embedding_model.predict(model_input, batch_size=adj.shape[0])
print(user_embs.shape)

0 comments on commit c42c6b7

Please sign in to comment.