forked from cahya-wirawan/cnn-text-classification-tf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_helpers.py
171 lines (154 loc) · 6.65 KB
/
data_helpers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import numpy as np
import re
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets import load_files
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def get_datasets_20newsgroup(subset='train', categories=None, shuffle=True, random_state=42):
"""
Retrieve data from 20 newsgroups
:param subset: train, test or all
:param categories: List of newsgroup name
:param shuffle: shuffle the list or not
:param random_state: seed integer to shuffle the dataset
:return: data and labels of the newsgroup
"""
datasets = fetch_20newsgroups(subset=subset, categories=categories, shuffle=shuffle, random_state=random_state)
return datasets
def get_datasets_mrpolarity(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
datasets = dict()
datasets['data'] = positive_examples + negative_examples
target = [0 for x in positive_examples] + [1 for x in negative_examples]
datasets['target'] = target
datasets['target_names'] = ['positive_examples', 'negative_examples']
return datasets
def get_datasets_localdata(container_path=None, categories=None, load_content=True,
encoding='utf-8', shuffle=True, random_state=42):
"""
Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder structure.
:param container_path: The path of the container
:param categories: List of classes to choose, all classes are chosen by default (if empty or omitted)
:param shuffle: shuffle the list or not
:param random_state: seed integer to shuffle the dataset
:return: data and labels of the dataset
"""
datasets = load_files(container_path=container_path, categories=categories,
load_content=load_content, shuffle=shuffle, encoding=encoding,
random_state=random_state)
return datasets
def load_data_labels(datasets):
"""
Load data and labels
:param datasets:
:return:
"""
# Split by words
x_text = datasets['data']
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
labels = []
for i in range(len(x_text)):
label = [0 for j in datasets['target_names']]
label[datasets['target'][i]] = 1
labels.append(label)
y = np.array(labels)
return [x_text, y]
def load_embedding_vectors_word2vec(vocabulary, filename, binary):
# load embedding_vectors from the word2vec
encoding = 'utf-8'
with open(filename, "rb") as f:
header = f.readline()
vocab_size, vector_size = map(int, header.split())
# initial matrix with random uniform
embedding_vectors = np.random.uniform(-0.25, 0.25, (len(vocabulary), vector_size))
if binary:
binary_len = np.dtype('float32').itemsize * vector_size
for line_no in range(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
if ch != b'\n':
word.append(ch)
word = str(b''.join(word), encoding=encoding, errors='strict')
idx = vocabulary.get(word)
if idx != 0:
embedding_vectors[idx] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.seek(binary_len, 1)
else:
for line_no in range(vocab_size):
line = f.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = str(line.rstrip(), encoding=encoding, errors='strict').split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, vector = parts[0], list(map('float32', parts[1:]))
idx = vocabulary.get(word)
if idx != 0:
embedding_vectors[idx] = vector
f.close()
return embedding_vectors
def load_embedding_vectors_glove(vocabulary, filename, vector_size):
# load embedding_vectors from the glove
# initial matrix with random uniform
embedding_vectors = np.random.uniform(-0.25, 0.25, (len(vocabulary), vector_size))
f = open(filename)
for line in f:
values = line.split()
word = values[0]
vector = np.asarray(values[1:], dtype="float32")
idx = vocabulary.get(word)
if idx != 0:
embedding_vectors[idx] = vector
f.close()
return embedding_vectors