Skip to content

Commit

Permalink
update 2.3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
amazingDD committed Aug 14, 2022
1 parent 9083366 commit 7159e0b
Show file tree
Hide file tree
Showing 15 changed files with 34 additions and 32 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
</p>

![PyPI - Python Version](https://img.shields.io/badge/pyhton-3.5%2B-blue)
[![Version](https://img.shields.io/badge/version-2.2.0-orange)](https://github.com/AmazingDD/daisyRec)
[![Version](https://img.shields.io/badge/version-2.3.0-orange)](https://github.com/AmazingDD/daisyRec)
![GitHub repo size](https://img.shields.io/github/repo-size/AmazingDD/daisyRec)
![GitHub](https://img.shields.io/github/license/AmazingDD/daisyRec)
[![arXiv](https://img.shields.io/badge/arXiv-daisyRec-%23B21B1B)](https://arxiv.org/abs/2206.10848)
Expand Down
2 changes: 1 addition & 1 deletion daisy/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = 'v2.2.0'
__version__ = 'v2.3.0'
4 changes: 2 additions & 2 deletions daisy/assets/basic.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ reproducibility: True
state: ~

optimization_metric: 'ndcg'
hyperopt_trail: 20
hyperopt_trail: 30
tune_testset: False
hyperopt_pack: '{}'
tune_pack: '{}'

algo_name: 'mf'
val_method: 'tsbr'
Expand Down
2 changes: 1 addition & 1 deletion daisy/model/EASERecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def fit(self, train_set):
self.interaction_matrix = X # user_num * item_num

def predict(self, u, i):
self.interaction_matrix[u, :].multiply(self.item_similarity[:, i].T).sum(axis=1).getA1()
return self.interaction_matrix[u, :].multiply(self.item_similarity[:, i].T).sum(axis=1).getA1()[0]

def rank(self, test_loader):
rec_ids = None
Expand Down
6 changes: 3 additions & 3 deletions daisy/model/FMRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def calc_loss(self, batch):
def predict(self, u, i):
u = torch.tensor(u, device=self.device)
i = torch.tensor(i, device=self.device)
pred = self.forward(u, i).cpu()
pred = self.forward(u, i).cpu().item()

return pred

Expand All @@ -122,11 +122,11 @@ def rank(self, test_loader):
return rec_ids.cpu().numpy()

def full_rank(self, u):
u = torch.tensor(u, self.device)
u = torch.tensor(u, device=self.device)

user_emb = self.embed_user(u)
items_emb = self.embed_item.weight
scores = torch.matmul(user_emb, items_emb.transpose(1, 0)) # (item_num,)
scores += self.u_bias(u) + self.i_bias.weight + self.bias_
scores += self.u_bias(u) + self.i_bias.weight.squeeze() + self.bias_

return torch.argsort(scores, descending=True)[:self.topk].cpu().numpy()
4 changes: 2 additions & 2 deletions daisy/model/Item2VecRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def predict(self, u, i):
item_emb = self.shared_embedding(i)
pred = (user_emb * item_emb).sum(dim=-1)

return pred.cpu()
return pred.cpu().item()

def rank(self, test_loader):
rec_ids = torch.tensor([], device=self.device)
Expand All @@ -101,7 +101,7 @@ def rank(self, test_loader):
return rec_ids.cpu().numpy()

def full_rank(self, u):
u = torch.tensor(u, self.device)
u = torch.tensor(u, device=self.device)

user_emb = self.user_embedding(u)
items_emb = self.shared_embedding.weight
Expand Down
2 changes: 1 addition & 1 deletion daisy/model/LightGCNRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def predict(self, u, i):
i_embedding = self.restore_item_e[i]
pred = torch.matmul(u_embedding, i_embedding.t())

return pred.cpu()
return pred.cpu().item()

def rank(self, test_loader):
if self.restore_user_e is None or self.restore_item_e is None:
Expand Down
4 changes: 2 additions & 2 deletions daisy/model/MFRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def calc_loss(self, batch):
def predict(self, u, i):
u = torch.tensor(u, device=self.device)
i = torch.tensor(i, device=self.device)
pred = self.forward(u, i).cpu()
pred = self.forward(u, i).cpu().item()

return pred

Expand All @@ -124,7 +124,7 @@ def rank(self, test_loader):


def full_rank(self, u):
u = torch.tensor(u, self.device)
u = torch.tensor(u, device=self.device)

user_emb = self.embed_user(u)
items_emb = self.embed_item.weight
Expand Down
6 changes: 3 additions & 3 deletions daisy/model/NFMRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def calc_loss(self, batch):
def predict(self, u, i):
u = torch.tensor(u, device=self.device)
i = torch.tensor(i, device=self.device)
pred = self.forward(u, i).cpu()
pred = self.forward(u, i).cpu().item()

return pred

Expand Down Expand Up @@ -192,7 +192,7 @@ def rank(self, test_loader):
return rec_ids.cpu().numpy()

def full_rank(self, u):
u = torch.tensor(u, self.device)
u = torch.tensor(u, device=self.device)

user_emb = self.embed_user(u) # factor
items_emb = self.embed_item.weight # item_num * factor
Expand All @@ -202,7 +202,7 @@ def full_rank(self, u):
if self.num_layers:
fm = self.deep_layers(fm) # item_num * factor
fm += self.u_bias(u) + self.i_bias.weight + self.bias_
scores = self.prediction(fm) # item_num
scores = self.prediction(fm).squeeze() # item_num

return torch.argsort(scores, descending=True)[:self.topk].cpu().numpy()

Expand Down
2 changes: 1 addition & 1 deletion daisy/model/NGCFRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def predict(self, u, i):
i_embedding = self.restore_item_e[i]
pred = torch.matmul(u_embedding, i_embedding.t())

return pred.cpu()
return pred.cpu().item()

def rank(self, test_loader):
if self.restore_user_e is None or self.restore_item_e is None:
Expand Down
6 changes: 3 additions & 3 deletions daisy/model/NeuMFRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def calc_loss(self, batch):
def predict(self, u, i):
u = torch.tensor(u, device=self.device)
i = torch.tensor(i, device=self.device)
pred = self.forward(u, i).cpu()
pred = self.forward(u, i).cpu().item()

return pred

Expand Down Expand Up @@ -206,10 +206,10 @@ def rank(self, test_loader):

rec_ids = torch.cat((rec_ids, rank_list), 0)

return rec_ids
return rec_ids.cpu().numpy()

def full_rank(self, u):
u = torch.tensor(u, self.device)
u = torch.tensor(u, device=self.device)

if not self.model == 'MLP':
embed_user_GMF = self.embed_user_GMF(u) # factor
Expand Down
11 changes: 6 additions & 5 deletions daisy/model/VAECFRecommender.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
year={2018}
}
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
Expand Down Expand Up @@ -81,7 +82,7 @@ def forward(self, rating_matrix):
h = self.encoder(h)

mu = h[:, :int(self.lat_dim / 2)]
logvar = h[:, int(self.lat_dim / 2):]
logvar = h[:, math.ceil(self.lat_dim / 2):]

z = self.reparameterize(mu, logvar)
z = self.decoder(z)
Expand Down Expand Up @@ -109,13 +110,13 @@ def calc_loss(self, batch):
return loss

def predict(self, u, i):
u = torch.tensor(u, device=self.device)
i = torch.tensor(i, device=self.device)
u = torch.tensor([u], device=self.device)
i = torch.tensor([i], device=self.device)

rating_matrix = self.get_user_rating_matrix(u)
scores, _, _ = self.forward(rating_matrix)

return scores[[torch.arange(len(i)).to(self.device), i]].cpu()
return scores[[torch.arange(len(i)).to(self.device), i]].cpu().item()

def rank(self, test_loader):
rec_ids = torch.tensor([], device=self.device)
Expand All @@ -137,7 +138,7 @@ def rank(self, test_loader):
return rec_ids.cpu().numpy()

def full_rank(self, u):
u = torch.tensor(u, device=self.device)
u = torch.tensor([u], device=self.device)
rating_matrix = self.get_user_rating_matrix(u)
scores, _, _ = self.forward(rating_matrix)

Expand Down
3 changes: 1 addition & 2 deletions daisy/utils/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@ def parse_args():
help='the metric to be optimized for hyper-parameter tuning via HyperOpt')
parser.add_argument('--hyperopt_trail',
type=int,
default=30,
help='the number of trails of HyperOpt')
parser.add_argument('--hyperopt_pack',
parser.add_argument('--tune_pack',
type=str,
help='record the searching space of hyper-parameters for HyperOpt')
# common settings
Expand Down
8 changes: 5 additions & 3 deletions run_examples/tune.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from ast import Global
import json
import optuna
import numpy as np
Expand Down Expand Up @@ -102,7 +103,7 @@
config['logger'] = logger

''' unpack hyperparameters to tune '''
param_dict = json.loads(config['hyperopt_pack'])
param_dict = json.loads(config['tune_pack'])
algo_name = config['algo_name']
kpi_name = config['optimization_metric']
tune_param_names = tune_params_config[algo_name]
Expand Down Expand Up @@ -133,6 +134,7 @@

''' define optimization target function '''
def objective(trial):
global TRIAL_CNT
for param in tune_param_names:
if param not in param_dict.keys(): continue

Expand All @@ -145,7 +147,7 @@ def objective(trial):
param, param_dict[param]['min'], param_dict[param]['max'], 1 if step is None else step)
elif param_type_config[param] == 'float':
config[param] = trial.suggest_float(
param, param_dict[param]['min'], param_dict[param]['max'], param_dict[param]['step'])
param, param_dict[param]['min'], param_dict[param]['max'], step=param_dict[param]['step'])
else:
raise ValueError(f'Invalid parameter type for {param}...')
else:
Expand Down Expand Up @@ -224,7 +226,7 @@ def objective(trial):

''' record the best choices '''
logger.info(f'Trial {study.best_trial.number} get the best {kpi_name}({study.best_trial.value}) with params: {study.best_trial.params}')
line = ','.join([study.best_params[param] for param in tune_param_names]) + f',{study.best_value:.4f}\n'
line = ','.join([str(study.best_params[param]) for param in tune_param_names]) + f',{study.best_value:.4f}\n'
f.write(line)
f.flush()
f.close()
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@
# package_dir={"": "daisy"},
package_data={"": ["*.yaml"]},
# packages = find_packages(exclude=['tests*']),
version='v2.2.0', # Ideally should be same as your GitHub release tag varsion
version='v2.3.0', # Ideally should be same as your GitHub release tag varsion
description=('An easy-to-use library for recommender systems.'),
long_description=long_description,
# long_description_content_type="text/markdown",
author='Yu Di',
author_email='[email protected]',
url='https://github.com/AmazingDD/daisyRec',
download_url='https://github.com/AmazingDD/daisyRec/archive/refs/tags/v2.2.0.tar.gz',
download_url='https://github.com/AmazingDD/daisyRec/archive/refs/tags/v2.3.0.tar.gz',
keywords=['ranking', 'recommendation'],
# include_package_data=True,
install_requires=install_requires,
Expand Down

0 comments on commit 7159e0b

Please sign in to comment.