Skip to content

Commit

Permalink
rewrite to Keras function API
Browse files Browse the repository at this point in the history
  • Loading branch information
orbxball committed May 31, 2017
1 parent 4b270b0 commit 3725e3e
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 17 deletions.
41 changes: 41 additions & 0 deletions hw6/Model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
from keras.layers import Input, Embedding, Reshape, Dense, Dropout
from keras.layers.merge import concatenate, dot
from keras.models import Model

def build_cf_model(n_users, n_movies, dim):
u_input = Input(shape=(1,))
u = Embedding(n_users, dim)(u_input)
u = Reshape((dim,))(u)

m_input = Input(shape=(1,))
m = Embedding(n_movies, dim)(m_input)
m = Reshape((dim,))(m)

out = dot([u, m], -1)

model = Model(inputs=[u_input, m_input], outputs=out)
return model


def build_deep_model(n_users, n_movies, dim, dropout=0.1):
u_input = Input(shape=(1,))
u = Embedding(n_users, dim)(u_input)
u = Reshape((dim,))(u)

m_input = Input(shape=(1,))
m = Embedding(n_movies, dim)(m_input)
m = Reshape((dim,))(m)

out = concatenate([u, m])
out = Dropout(dropout)(out)
out = Dense(dim, activation='relu')(out)
out = Dropout(dropout)(out)
out = Dense(1, activation='linear')(out)

model = Model(inputs=[u_input, m_input], outputs=out)
return model

def rate(model, user_id, item_id):
return model.predict([np.array([user_id]), np.array([item_id])])[0][0]

35 changes: 21 additions & 14 deletions hw6/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,18 @@
import argparse
import numpy as np
import pandas as pd
from CFModel import CFModel, DeepModel
from Model import build_cf_model, build_deep_model, rate


def parse_args():
parser = argparse.ArgumentParser(description='HW6: Matrix Factorization')
parser.add_argument('train', type=str)
parser.add_argument('data_dir', type=str)
parser.add_argument('output', type=str)
return parser.parse_args()


def predict_rating(trained_model, userid, movieid):
return trained_model.rate(userid - 1, movieid - 1)
return rate(trained_model, userid - 1, movieid - 1)


def ensure_dir(file_path):
Expand All @@ -24,11 +25,6 @@ def ensure_dir(file_path):


def main(args):
ratings = pd.read_csv(args.train, usecols=['UserID', 'MovieID', 'Rating'])
max_userid = ratings['UserID'].drop_duplicates().max()
max_movieid = ratings['MovieID'].drop_duplicates().max()
print('{} ratings loaded.'.format(ratings.shape[0]))

users = pd.read_csv(USERS_CSV, sep='::', engine='python',
usecols=['UserID', 'Gender', 'Age', 'Occupation', 'Zip-code'])
print('{} description of {} users loaded'.format(len(users), max_userid))
Expand All @@ -40,7 +36,7 @@ def main(args):
test_data = pd.read_csv(TEST_CSV, usecols=['UserID', 'MovieID'])
print('{} testing data loaded.'.format(test_data.shape[0]))

trained_model = DeepModel(max_userid, max_movieid, DIM)
trained_model = build_deep_model(max_userid, max_movieid, DIM)
print('Loading model weights...')
trained_model.load_weights(MODEL_WEIGHTS_FILE)
print('Loading model done!!!')
Expand All @@ -56,12 +52,23 @@ def main(args):
if __name__ == '__main__':
args = parse_args()

TEST_CSV = 'data/test.csv'
USERS_CSV = 'data/users.csv'
MOVIES_CSV = 'data/movies.csv'
MODEL_DIR = './model'
MAX_CSV = 'max.csv'
TEST_CSV = 'test.csv'
USERS_CSV = 'users.csv'
MOVIES_CSV = 'movies.csv'
MODEL_WEIGHTS_FILE = 'weights.h5'

DIM = 120
TEST_USER = 3000
DATA_DIR = args.data_dir
TEST_CSV = os.path.join(DATA_DIR, TEST_CSV)
USERS_CSV = os.path.join(DATA_DIR, USERS_CSV)
MOVIES_CSV = os.path.join(DATA_DIR, MOVIES_CSV)

MODEL_WEIGHTS_FILE = os.path.join(MODEL_DIR, MODEL_WEIGHTS_FILE)
MAX_CSV = os.path.join(MODEL_DIR, MAX_CSV)
info = pd.read_csv(MAX_CSV)
DIM = list(info['dim'])[0]
max_userid = list(info['max_userid'])[0]
max_movieid = list(info['max_movieid'])[0]

main(args)
21 changes: 18 additions & 3 deletions hw6/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
import numpy as np
import pandas as pd
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from CFModel import CFModel, DeepModel
from Model import build_cf_model, build_deep_model, rate


def parse_args():
parser = argparse.ArgumentParser(description='HW6: Matrix Factorization')
parser.add_argument('train', type=str)
parser.add_argument('test', type=str)
parser.add_argument('--dim', type=str, default=120)
return parser.parse_args()


Expand All @@ -22,6 +24,13 @@ def main(args):
ratings['Movie_emb_id'] = ratings['MovieID'] - 1
print('{} ratings loaded.'.format(ratings.shape[0]))

maximum = {}
maximum['max_userid'] = [max_userid]
maximum['max_movieid'] = [max_movieid]
maximum['dim'] = [DIM]
pd.DataFrame(data=maximum).to_csv(MAX_FILE, index=False)
print('max info save to {}'.format(MAX_FILE))

ratings = ratings.sample(frac=1)
Users = ratings['User_emb_id'].values
print('Users: {}, shape = {}'.format(Users, Users.shape))
Expand All @@ -30,7 +39,7 @@ def main(args):
Ratings = ratings['Rating'].values
print('Ratings: {}, shape = {}'.format(Ratings, Ratings.shape))

model = DeepModel(max_userid, max_movieid, DIM)
model = build_deep_model(max_userid, max_movieid, DIM)
model.compile(loss='mse', optimizer='adamax')

callbacks = [EarlyStopping('val_loss', patience=2),
Expand All @@ -41,7 +50,13 @@ def main(args):
if __name__ == '__main__':
args = parse_args()

DIM = 120
MODEL_DIR = './model'
DIM = args.dim
MODEL_WEIGHTS_FILE = 'weights.h5'
MAX_FILE = 'max.csv'

if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
MODEL_WEIGHTS_FILE = os.path.join(MODEL_DIR, MODEL_WEIGHTS_FILE)
MAX_FILE = os.path.join(MODEL_DIR, MAX_FILE)
main(args)

0 comments on commit 3725e3e

Please sign in to comment.