-
Notifications
You must be signed in to change notification settings - Fork 0
/
CategoryCluster.py
214 lines (149 loc) · 5.43 KB
/
CategoryCluster.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
# coding: utf-8
# In[612]:
import pandas as pd
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
import os
import pickle
import pymysql
import json
config_fn = './config.json'
print("Import Complete")
# In[613]:
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
# In[614]:
def connect(config):
return pymysql.connect(
host=config['ai_db_host'], # Database host
port=config['ai_db_port'], # Database port
user=config['ai_db_username'], # Database user
passwd=config['ai_db_password'], # Database password
db=config['ai_db_name'], # Database name
connect_timeout=5,
cursorclass=pymysql.cursors.DictCursor
)
def pull_data():
with open(config_fn, "r") as f:
config = json.loads(f.read())
conn = connect(config)
sql_1 = "SELECT rowId, question, category FROM cleanHotlineQuestionAnswer;"
with conn.cursor() as cursor:
cursor.execute(sql_1)
result = cursor.fetchall()
cursor.close()
return result
# In[615]:
def cluster(df, df2, N, name, v=False):
clusterer = KMeans(n_clusters=N)
clusterer.fit(df2)
save_obj(clusterer, 'clusterer_' + name )
transform = clusterer.transform(df2)
d_center = []
cluster = []
for x in transform:
d_center.append(min(x)**2)
cluster.append(np.argmin(x))
df['cluster'] = cluster
df['d_from_center'] = d_center
d_center = np.array(d_center)
mean = np.mean(d_center)
std = np.std(d_center)
if v == True:
print("Mean: {}".format(round(mean, 3)))
print("STD: {}".format(round(std, 3)))
print("")
for cgroup in range(N):
group = df.groupby('cluster').get_group(cgroup)
print_clusters(group)
return df
def print_clusters(group):
std = np.std(list(group.d_from_center))
print("Found {} messages of the same form. STD: {}".format(len(group), std))
for message in group.question.head(5):
if group.question.count() > 1:
print(message)
print("")
print("")
# In[616]:
def print_to_tsv(df, X, cat_name):
vector_doc = 'doc_vectors_' + cat_name + '.tsv'
count = 0
with open(vector_doc,'w') as w:
for question in X:
string = ""
for v in question:
string = string + str(v) + "\t"
w.write(string + os.linesep)
count += 1
w.close
print("Wrote file {} with {} entries".format(vector_doc, count))
meta_doc = 'doc_meta_' + cat_name + '.tsv'
count = 0
with open(meta_doc,'w') as w:
w.write("cluster\tquestion\t" + os.linesep)
for question, cluster in zip(list(df.question), list(df.cluster)):
string = ""
string = str(cluster) + "\t" + str(question) + "\t"
w.write(string + os.linesep)
count += 1
w.close
print("Wrote file {} with {} entries".format(meta_doc, count))
# In[617]:
def train_model(df, N, name):
print("Loaded {} Data Points".format(len(df)))
vectorizer = TfidfVectorizer(min_df=0.01, max_df=0.7 )
X_vectoizer = vectorizer.fit_transform(list(df.question))
save_obj(vectorizer, 'vectorizer_' + name )
print("Vectorization Complete")
n_components = 60
explained_variance = 0.0
while explained_variance < .5 and n_components < 175:
svd = TruncatedSVD(n_components=n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X_vectoizer)
save_obj(svd, 'svd_' + name )
save_obj(normalizer, 'normalizer_' + name )
df["features"] = list(X)
explained_variance = svd.explained_variance_ratio_.sum()
n_components += 5
print("Explained variance of the SVD step: {}% n_componets: {}".format(
int(explained_variance * 100), n_components))
df = cluster(df, X, N, name, v=False)
print_to_tsv(df, X, name)
# In[618]:
def train_all():
df_master = pd.DataFrame(pull_data())
cat_names = ["Compensation", "Compliance", "Employee Benefits",
"Leaves of Absence", "Recruiting and Hiring", "Terminations"]
Ns = [20, 15, 14, 9, 9, 7]
for name, N in zip(cat_names, Ns):
df = df_master[df_master["category"] == name].copy()
train_model(df, N, name)
# In[619]:
def predict(messages, category):
vectorizer = load_obj( 'vectorizer_' + category )
svd = load_obj( 'svd_' + category )
normalizer = load_obj( 'normalizer_' + category )
clusterer = load_obj("clusterer_" + category)
pipeline = make_pipeline(vectorizer, svd, normalizer)
messages = pipeline.transform(messages)
clusters = clusterer.predict(messages)
return clusters
# In[620]:
message = ("With the minimum wage in MA going up to $11 per hour, for our sales people," +
"they make a base of $400 per week plus bonus." +
"Would we have to increase their base rate of pay to minimum wage? What if they work a week with no bonus?")
category = "Compensation"
print(predict([message], category))