-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathData_Explorer.py
More file actions
151 lines (114 loc) · 4.58 KB
/
Data_Explorer.py
File metadata and controls
151 lines (114 loc) · 4.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import spacy as sp
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
from io import BytesIO
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from textblob import TextBlob
class DataExplorer():
# Initialize the class. Load the spaCy model and the JSON file specified by filestr.
def __init__(self, filestr:str) -> None:
self.nlp = sp.load("en_core_web_lg")
self.df = None
self.word_list = self.__load_json(filestr)
## PRIVATE METHODS
# Load the JSON file specified by filestr
def __load_json(self, filestr:str)->list:
self.df = pd.read_json(filestr)
# Remove punctuation and stop words from each text span
self.__del_punct(self.df['paragraphs'])
self.__del_stop(self.df['paragraphs'])
# Return the word list
return self.df['paragraphs']
def __del_punct(self, para_list:list)->None:
trans_table = str.maketrans('', '', string.punctuation)
for index, para in enumerate(para_list):
para_list[index] = para_list[index].translate(trans_table)
def __del_stop(self, para_list:list)->None:
for index, para in enumerate(para_list):
lst = []
doc = self.nlp(para)
lst = [token.text for token in doc if not token.is_stop]
para_list[index] = " ".join(lst)
def __create_corpus(self, text)->list:
new = text.str.split()
new = new.values.tolist()
corpus = [word for i in new for word in i]
return corpus
# Return the number of n-grams specified by cnt. The type of n-gram returned (bigram, trigram)
# is specified by the value of n.
def __get_top_ngram(self, n:int, cnt:int):
vec = CountVectorizer(ngram_range=(n, n)).fit(self.get_word_list())
bag_of_words = vec.transform(self.get_word_list())
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:cnt]
# PUBLIC METHODS
# Return the word list
def get_word_list(self)->list:
return self.word_list
def show_char_word(self, bincnt=10)->None:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,4))
char_map = self.word_list.str.len()
word_map = self.word_list.str.split().map(lambda x: len(x))
plt.subplot(1,2,1)
plt.xlabel("Characters")
plt.ylabel("Count")
ax1.hist(char_map, bincnt)
plt.subplot(1,2,2)
plt.xlabel("Words")
plt.ylabel("Count")
ax2.hist(word_map, bincnt)
plt.tight_layout()
buf = BytesIO()
fig.savefig(buf, format='png')
st.image(buf)
def show_common_words(self, cnt=40)->None:
plt.subplots(figsize=(7,8))
corpus = self.__create_corpus(self.get_word_list())
counter = Counter(corpus)
most = counter.most_common()
x, y=[], []
for word,count in most[:cnt]:
if (word):
x.append(word)
y.append(count)
fig = plt.figure(figsize=(8,7))
g = sns.barplot(x=y,y=x, width=0.6)
g.set(xlabel="Count", ylabel="Common Words", title="Common Words")
plt.tight_layout()
buf = BytesIO()
fig.savefig(buf, format='png')
st.image(buf)
def show_ngrams(self, n=2, cnt=10)->None:
fig = plt.figure(figsize=(8,7))
top_n_grams=self.__get_top_ngram(n, cnt)[:cnt]
x, y = map(list,zip(*top_n_grams))
g = sns.barplot(x=y,y=x)
title_str = "N-Gram " + "(" + str(n) + ")"
g.set(xlabel="Count", title=title_str)
plt.tight_layout()
buf = BytesIO()
fig.savefig(buf, format='png')
st.image(buf)
def show_entity(self, entity:str, cnt=10)->None:
nlp = self.nlp
text = self.get_word_list()
entity = entity.upper()
fig = plt.figure(figsize=(8,7))
def _get_ner(text,ent):
doc=nlp(text)
return [X.text for X in doc.ents if X.label_ == ent]
entity_filtered=text.apply(lambda x: _get_ner(x,entity))
entity_filtered=[i for x in entity_filtered for i in x]
counter=Counter(entity_filtered)
x,y=map(list,zip(*counter.most_common(cnt)))
sns.barplot(x=y,y=x).set_title(entity)
plt.tight_layout()
buf = BytesIO()
fig.savefig(buf, format='png')
st.image(buf)