-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtests.py
More file actions
84 lines (78 loc) · 2.65 KB
/
tests.py
File metadata and controls
84 lines (78 loc) · 2.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# -*- coding: utf-8 -*-
# import ijson
import bz2
import json
import ahocorasick
known_words = ahocorasick.Automaton()
known_words.add_word("первый второй", (1, "первый второй"))
known_words.add_word("второй", (3, "второй"))
known_words.add_word("второй", (2, "второй"))
known_words.make_automaton()
haystack = "На первый второй, рассчитайсь!"
for end_index, (insert_order, original_value) in known_words.iter(haystack):
start_index = end_index - len(original_value) + 1
print((start_index, end_index, (insert_order, original_value)))
assert haystack[start_index:start_index + len(original_value)] == original_value
some_set = set()
some_set.add("word1")
some_set.add("word2")
some_set.add("второй")
for itms in some_set:
result = known_words.get(itms, "nope")
if result != "nope":
print ("Yay!")
print (result)
# def find_synonims(text):
# print ("lul")
# important = {}
# synonyms = bz2.BZ2File("C:/Users/2/Downloads/latest-all.json.bz2", 'rb')
# i = 0
# for obj in ijson.items(synonyms, 'item'):
# try:
# # if (obj['ru']['en']['value'] == text) or\
# # (obj['labels']['en-gb']['value'] == text):
# name = obj['labels']['ru']['value']
# aliases = obj['aliases']['ru']
# important[name] = aliases
# i+=1
# if i%5000 == 0:
# print ('Working number ', i)
#
# # break
# except KeyError:
# continue
# with open('synonyms.json', 'a') as f:
# f.write(json.dumps(important))
# print find_synonims('Belgium')
# from nltk.stem.snowball import SnowballStemmer
# from nltk.tokenize.punkt import PunktSentenceTokenizer
# from nltk.tokenize import word_tokenize
# from nltk.tokenize.moses import MosesDetokenizer
# from nltk.tokenize.moses import MosesTokenizer
# import nltk
#
# # nltk.download()
# import re
# import string
#
# print "not lul"
# stemmer = SnowballStemmer("russian")
# s = "Хрень, со: знаками, препинания!"
# s = s[:-1]
# nltk.download()
# # s = re.sub('['+string.punctuation+']', '', s)
# print "seriously"
# tokens = word_tokenize("Хрень, со: знаками, препинания!")
# print tokens
# tokenizer = MosesTokenizer()
# tokenss = tokenizer.tokenize(s.decode('utf-8'))
# tokens = word_tokenize(s)
# print tokens
# print tokenss
# for token in tokens:
# print stemmer.stem(token.decode('utf8'))
#
# detokenizer = MosesDetokenizer()
# # for tokensssss in tokenss:
# # tokensssss = tokensssss.decode('utf8')
# print detokenizer.detokenize(tokenss)