forked from zt8zf/Spring-2018
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtopic_modeling.py
More file actions
279 lines (212 loc) · 9.16 KB
/
topic_modeling.py
File metadata and controls
279 lines (212 loc) · 9.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 03 09:06:06 2016
@author: DIP
"""
from gensim import corpora, models
from normalization import normalize_corpus
import numpy as np
toy_corpus = ["The fox jumps over the dog",
"The fox is very clever and quick",
"The dog is slow and lazy",
"The cat is smarter than the fox and the dog",
"Python is an excellent programming language",
"Java and Ruby are other programming languages",
"Python and Java are very popular programming languages",
"Python programs are smaller than Java programs"]
# LSI topic model
norm_tokenized_corpus = normalize_corpus(toy_corpus, tokenize=True)
norm_tokenized_corpus
dictionary = corpora.Dictionary(norm_tokenized_corpus)
print dictionary.token2id
corpus = [dictionary.doc2bow(text) for text in norm_tokenized_corpus]
corpus
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
total_topics = 2
lsi = models.LsiModel(corpus_tfidf,
id2word=dictionary,
num_topics=total_topics)
for index, topic in lsi.print_topics(total_topics):
print 'Topic #'+str(index+1)
print topic
print
def print_topics_gensim(topic_model, total_topics=1,
weight_threshold=0.0001,
display_weights=False,
num_terms=None):
for index in range(total_topics):
topic = topic_model.show_topic(index)
topic = [(word, round(wt,2))
for word, wt in topic
if abs(wt) >= weight_threshold]
if display_weights:
print 'Topic #'+str(index+1)+' with weights'
print topic[:num_terms] if num_terms else topic
else:
print 'Topic #'+str(index+1)+' without weights'
tw = [term for term, wt in topic]
print tw[:num_terms] if num_terms else tw
print
print_topics_gensim(topic_model=lsi,
total_topics=total_topics,
num_terms=5,
display_weights=True)
# LSI custom built topic model
from utils import build_feature_matrix, low_rank_svd
norm_corpus = normalize_corpus(toy_corpus)
vectorizer, tfidf_matrix = build_feature_matrix(norm_corpus,
feature_type='tfidf')
td_matrix = tfidf_matrix.transpose()
td_matrix = td_matrix.multiply(td_matrix > 0)
total_topics = 2
feature_names = vectorizer.get_feature_names()
u, s, vt = low_rank_svd(td_matrix, singular_count=total_topics)
weights = u.transpose() * s[:, None]
def get_topics_terms_weights(weights, feature_names):
feature_names = np.array(feature_names)
sorted_indices = np.array([list(row[::-1])
for row
in np.argsort(np.abs(weights))])
sorted_weights = np.array([list(wt[index])
for wt, index
in zip(weights,sorted_indices)])
sorted_terms = np.array([list(feature_names[row])
for row
in sorted_indices])
topics = [np.vstack((terms.T,
term_weights.T)).T
for terms, term_weights
in zip(sorted_terms, sorted_weights)]
return topics
def print_topics_udf(topics, total_topics=1,
weight_threshold=0.0001,
display_weights=False,
num_terms=None):
for index in range(total_topics):
topic = topics[index]
topic = [(term, float(wt))
for term, wt in topic]
topic = [(word, round(wt,2))
for word, wt in topic
if abs(wt) >= weight_threshold]
if display_weights:
print 'Topic #'+str(index+1)+' with weights'
print topic[:num_terms] if num_terms else topic
else:
print 'Topic #'+str(index+1)+' without weights'
tw = [term for term, wt in topic]
print tw[:num_terms] if num_terms else tw
print
topics = get_topics_terms_weights(weights, feature_names)
print_topics_udf(topics=topics,
total_topics=total_topics,
weight_threshold=0.15,
display_weights=False)
def train_lsi_model_gensim(corpus, total_topics=2):
norm_tokenized_corpus = normalize_corpus(corpus, tokenize=True)
dictionary = corpora.Dictionary(norm_tokenized_corpus)
mapped_corpus = [dictionary.doc2bow(text)
for text in norm_tokenized_corpus]
tfidf = models.TfidfModel(mapped_corpus)
corpus_tfidf = tfidf[mapped_corpus]
lsi = models.LsiModel(corpus_tfidf,
id2word=dictionary,
num_topics=total_topics)
return lsi
def train_lda_model_gensim(corpus, total_topics=2):
norm_tokenized_corpus = normalize_corpus(corpus, tokenize=True)
dictionary = corpora.Dictionary(norm_tokenized_corpus)
mapped_corpus = [dictionary.doc2bow(text)
for text in norm_tokenized_corpus]
tfidf = models.TfidfModel(mapped_corpus)
corpus_tfidf = tfidf[mapped_corpus]
lda = models.LdaModel(corpus_tfidf,
id2word=dictionary,
iterations=1000,
num_topics=total_topics)
return lda
lda_gensim = train_lda_model_gensim(toy_corpus,
total_topics=2)
print_topics_gensim(topic_model=lda_gensim,
total_topics=2,
num_terms=5,
display_weights=True)
from sklearn.decomposition import LatentDirichletAllocation
norm_corpus = normalize_corpus(toy_corpus)
vectorizer, tfidf_matrix = build_feature_matrix(norm_corpus,
feature_type='tfidf')
total_topics = 2
lda = LatentDirichletAllocation(n_topics=total_topics,
max_iter=1000,
learning_method='online',
learning_offset=50.,
random_state=42)
lda.fit(tfidf_matrix)
feature_names = vectorizer.get_feature_names()
weights = lda.components_
topics = get_topics_terms_weights(weights, feature_names)
print_topics_udf(topics=topics,
total_topics=total_topics,
num_terms=8,
display_weights=True)
from sklearn.decomposition import NMF
norm_corpus = normalize_corpus(toy_corpus)
vectorizer, tfidf_matrix = build_feature_matrix(norm_corpus,
feature_type='tfidf')
total_topics = 2
nmf = NMF(n_components=total_topics,
random_state=42, alpha=.1, l1_ratio=.5)
nmf.fit(tfidf_matrix)
feature_names = vectorizer.get_feature_names()
weights = nmf.components_
topics = get_topics_terms_weights(weights, feature_names)
print_topics_udf(topics=topics,
total_topics=total_topics,
num_terms=None,
display_weights=True)
import pandas as pd
import numpy as np
CORPUS = pd.read_csv('amazon_skyrim_reviews.csv')
CORPUS = np.array(CORPUS['Reviews'])
# view sample review
print CORPUS[12]
total_topics = 5
lsi_gensim = train_lsi_model_gensim(CORPUS,
total_topics=total_topics)
print_topics_gensim(topic_model=lsi_gensim,
total_topics=total_topics,
num_terms=10,
display_weights=False)
lda_gensim = train_lda_model_gensim(CORPUS,
total_topics=total_topics)
print_topics_gensim(topic_model=lda_gensim,
total_topics=total_topics,
num_terms=10,
display_weights=False)
norm_corpus = normalize_corpus(CORPUS)
vectorizer, tfidf_matrix = build_feature_matrix(norm_corpus,
feature_type='tfidf')
feature_names = vectorizer.get_feature_names()
lda = LatentDirichletAllocation(n_topics=total_topics,
max_iter=1000,
learning_method='online',
learning_offset=10.,
random_state=42)
lda.fit(tfidf_matrix)
weights = lda.components_
topics = get_topics_terms_weights(weights, feature_names)
print_topics_udf(topics=topics,
total_topics=total_topics,
num_terms=10,
display_weights=False)
nmf = NMF(n_components=total_topics,
random_state=42, alpha=.1, l1_ratio=.5)
nmf.fit(tfidf_matrix)
feature_names = vectorizer.get_feature_names()
weights = nmf.components_
topics = get_topics_terms_weights(weights, feature_names)
print_topics_udf(topics=topics,
total_topics=total_topics,
num_terms=10,
display_weights=False)