-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocessing.py
More file actions
241 lines (209 loc) · 8.85 KB
/
preprocessing.py
File metadata and controls
241 lines (209 loc) · 8.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
# This script is used for preprocessing. Different preprocessing options can be specified using command-line flags.
# packages
import argparse
import re
import string
import json
import requests
import bs4
import nltk
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
# import custom utils
import utils_preprocessing
# load nltk data
print("Loading nltk data...")
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
# load slang json
print("Loading myslang.json...")
resp = requests.get("http://www.netlingo.com/acronyms.php")
soup = bs4.BeautifulSoup(resp.text, "html.parser")
slangdict = {}
key = ""
value = ""
for div in soup.findAll('div', attrs={'class': 'list_box3'}):
for li in div.findAll('li'):
for a in li.findAll('a'):
key = a.text
value = li.text.split(key)[1]
slangdict[key] = value
with open('Preprocessing_Data/myslang.json', 'w') as f:
json.dump(slangdict, f, indent=2)
# load wordlists into global variables
STOP_WORDS = set(stopwords.words('english'))
with open('Preprocessing_Data/contractions.json', 'r') as file:
CONTRACTIONS = json.load(file)
with open('Preprocessing_Data/abbreviations.json', 'r') as file:
ABBREVIATIONS = json.load(file)
def replace_contractions(tweet: str):
""" Replace contraction in a text
Args:
tweet (string): tweet as a string
Returns:
string: tweet with contractions removed
"""
tknzr = TweetTokenizer()
words = tknzr.tokenize(tweet)
result = []
for i in words:
if i in CONTRACTIONS:
result.append(CONTRACTIONS.get(i))
else:
result.append(i)
text = ' '.join(word for word in result)
return text
def replace_abbreviations(tweet: str):
""" Replace contraction in a text
Args:
tweet (string): tweet as a string
Returns:
string: tweet with contractions removed
"""
tknzr = TweetTokenizer()
words = tknzr.tokenize(tweet)
result = []
for i in words:
if i in ABBREVIATIONS:
result.append(ABBREVIATIONS.get(i))
else:
result.append(i)
text = ' '.join(word for word in result)
return text
def stemming(tweet: str) -> str:
""" Stem a tweet. Stemming is a process of reducing words to their word stem, base or root form (for example, books — book, looked — look).
Args:
tweet (string): tweet as a string
Returns:
string: tweet stemmed
"""
stemmer = PorterStemmer()
tokens = word_tokenize(tweet)
result = [stemmer.stem(i) for i in tokens]
text = ' '.join(word for word in result)
return text
def lemmatizing(tweet: str) -> str:
"""Lemmatize a tweet. The aim of lemmatization, like stemming, is to reduce inflectional forms to a common base form.
As opposed to stemming, lemmatization does not simply chop off inflections.
Instead it uses lexical knowledge bases to get the correct base forms of words. [ex: running - run]
Args:
tweet (string): tweet as a string
Returns:
string: tweet lemmatized
"""
lemmatizer = WordNetLemmatizer()
tokens = word_tokenize(tweet)
result = [lemmatizer.lemmatize(i) for i in tokens]
text = ' '.join(word for word in result)
return text
def remove_stop_words(tweet: str) -> str:
""" Remove stop words. A stop word is a commonly used word (such as “the”, “a”, “an”, “in”) that dont give any
information (for sentiment analysis) and takes space in the database and increases the processing time.
Args:
tweet (string): tweet as a string
Returns:
string: tweet without stop words
"""
tokens = word_tokenize(tweet)
result = [i for i in tokens if not i in STOP_WORDS]
text = ' '.join(word for word in result)
return text
def basic_preprocess(tweet: str) -> str:
"""Remove numbers, user, url, hastags, excessive whitespaces tags and force to lower case
Args:
tweet (string): tweet as as string
Returns:
string: tweet without numbers, user, url, hashtags, excess whitespaces and forced to lower case
"""
tweet = re.sub('<user>', '', tweet) # remove user tags
tweet = re.sub('<url>', '', tweet) # remove url tags
tweet = re.sub('[0-9]', '', tweet) # remove numbers
tweet = re.sub('#', '', tweet) # remove hashtag symbols (ADDED)
#tweet = re.sub('#\w*', '', tweet) # remove hashtags and the words linked to it (REMOVED)
tweet = re.sub('\s+', ' ', tweet) # remove excess whitespace
tweet = re.sub('^\s', '', tweet) # remove excess whitespace
tweet = re.sub('\s$', '', tweet) # remove excess whitespace
tweet = tweet.strip() # remove star/end whitespaces
tweet = tweet.lower() # lower case
return tweet
def remove_punctuation(tweet: str) -> str:
""" Remove punctuation and special chars '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
Args:
tweet (string): tweet as string
Returns:
string: tweet without punctuation
"""
tweet = "".join([char for char in tweet if char not in string.punctuation])
return tweet
def data_augmentation(tweet: str) -> str:
""" Creates a new tweet replacing a word by its synonym using the Thesaurus-based subsititution
Args:
tweet (string): tweet as string
Returns:
string: new augmented tweet
"""
# try with 2 words instead of 3 as well, might be better?
augmented_tweet = utils_preprocessing.synonym_replacement(tweet, 3)
return augmented_tweet
def main(args):
""" runs the whole preprocessing pipeline according to command line arguments
Args:
command line arguments including file paths and verbose flag
"""
# reading data
if args.verbose: print("reading input from %s..." % args.input_path)
df = utils_preprocessing.load_raw_data(args.input_path)
print("number of tweets: %s" % len(df))
# basic preprocessing
if args.verbose: print("basic processing...")
df['tweet'] = df['tweet'].apply(lambda row: basic_preprocess(str(row)))
#data augmentation
if args.augmentation:
if args.verbose: print("processing: data augmentation...")
augmentation = pd.DataFrame(columns=['tweet'])
augmentation['tweet'] = df['tweet'].apply(lambda row: data_augmentation((str(row))))
df = pd.concat([df, augmentation], ignore_index=True, sort=False)
# contractions
if args.verbose: print("processing: replace contraction...")
df['tweet'] = df['tweet'].apply(lambda row: replace_contractions(str(row)))
# abbreviations
if args.verbose: print("processing: replace abbreviations...")
df['tweet'] = df['tweet'].apply(lambda row: replace_abbreviations(str(row)))
# punctuation
if args.verbose: print("processing: remove punctuation and special characters...")
df['tweet'] = df['tweet'].apply(lambda row: remove_punctuation(str(row)))
# stop words
if args.stop_words:
if args.verbose: print("processing: remove stopwords...")
df['tweet'] = df['tweet'].apply(lambda row: remove_stop_words(str(row)))
# stemming
if args.stemming:
if args.verbose: print("processing: stemming...")
df['tweet'] = df['tweet'].apply(lambda row: stemming(str(row)))
# lemmatizing
if args.lemmatizing:
if args.verbose: print("processing: lemmatizing...")
df['tweet'] = df['tweet'].apply(lambda row: lemmatizing(str(row)))
# basic preprocessing
if args.verbose: print("basic processing... Again...")
df['tweet'] = df['tweet'].apply(lambda row: basic_preprocess(str(row)))
# writing output
if args.verbose: print("writing output to %s..." % args.output_path)
df.to_csv(args.output_path, header=False, index=None, sep=',')
# running this file from command-line will do a full preprocessing pass on specified data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='takes raw data, outputs preprocessed data')
parser.add_argument('input_path', type=str, help='path to raw data', action='store')
parser.add_argument('output_path', type=str, help='path where output should be written', action='store')
parser.add_argument('-s', '--stemming', dest='stemming', help='do you want to stemm tweet?', action='store_true')
parser.add_argument('-l', '--lemmatizing', dest='lemmatizing', help='do you want to lemmatize tweet?', action='store_true')
parser.add_argument('-sw', '--stop_words', dest='stop_words', help='do you to remove stop words?', action='store_true')
parser.add_argument('-a', '--augmentation', dest='augmentation', help='want to do data augmentation?', action='store_true')
parser.add_argument('-v', '--verbose', dest='verbose', help='want verbose output or not?', action='store_true')
args = parser.parse_args()
main(args)