-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
70 lines (53 loc) · 1.91 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import nltk, string, csv,array, numpy
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from pandas import DataFrame
import pandas as pd
def fun(dataaa):
#to store lemmatized data
data=""
l=WordNetLemmatizer()
text=word_tokenize(dataaa)
#lemmatizing each word
for w in text:
x=l.lemmatize(w)
data=data+" "+x
#it is the pre defined stop words so as to compare and not copy
stopWords = set(stopwords.words('english'))
words = word_tokenize(data)
wordsFiltered = ""
#not copying the stop words
for w in words:
if w not in stopWords:
wordsFiltered=wordsFiltered+" "+w
#final output
return wordsFiltered
stemmer = nltk.stem.porter.PorterStemmer()
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
def stem_tokens(tokens):
return [stemmer.stem(item) for item in tokens]
'''remove punctuation, lowercase, stem'''
def normalize(text):
return stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))
vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words='english')
def cosine_sim(text1, text2):
tfidf = vectorizer.fit_transform([text1, text2])
return ((tfidf * tfidf.T).A)[0,1]
rows=[]
columns=[]
with open(r'C:\NLP\abstract1.csv','rt')as f:
abstract = csv.reader(f)
for row in abstract:
rows=rows+row
with open(r'C:\NLP\files1.csv','rt')as f:
files = csv.reader(f)
for column in files:
columns=columns+column
a = [[0 for x in range(len(rows))] for x in range(len(columns))]
for i in range(len(rows)):
for j in range(len(columns)):
a[j][i]=cosine_sim(rows[i],columns[j])
df = pd.DataFrame(a)
df.to_csv(r'C:\NLP\data.csv', index=False)