-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
146 lines (122 loc) · 4.24 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import json
import plotly
import pandas as pd
import numpy as np
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine("sqlite:///data/disaster_response.db")
df = pd.read_sql_table("messages", engine)
# load model
model = joblib.load("models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route("/")
@app.route("/index")
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
n_messages = df.groupby("genre").count()["message"]
count_percentage = 100 * n_messages / n_messages.sum()
genres = list(n_messages.index)
messages_per_category = df.drop(
["id", "message", "original", "genre"], axis=1
).sum()
messages_per_category = messages_per_category.sort_values(ascending=False)
categories = list(messages_per_category.index)
df["text_len"] = df.message.str.len()
n_messages, bins = np.histogram(df.text_len, bins=range(0, 1000, 100))
bins = bins[:-1] + bins[1:]
# create visuals
graphs = [
{
"data": [
{
"type": "pie",
"uid": "f4de1f",
"hole": 0.4,
"name": "Genre",
"pull": 0,
"domain": {"x": count_percentage, "y": genres},
"textinfo": "label+value",
"hoverinfo": "all",
"labels": genres,
"values": n_messages,
}
],
"layout": {"title": "Number of messages by genre"},
},
{
"data": [
{
"type": "bar",
"x": bins,
"y": n_messages,
"marker": {"color": "#7fc97f"},
}
],
"layout": {
"title": "Message distribution by length",
"yaxis": {"title": "Number of messages"},
"xaxis": {"title": "Message length"},
"barmode": "group",
},
},
{
"data": [
{
"type": "bar",
"x": categories,
"y": messages_per_category,
"marker": dict(
size=36,
# set color equal to a variable
color=np.random.randn(256),
# one of plotly colorscales
colorscale="hot",
# enable color scale
showscale=False,
),
}
],
"layout": {
"title": "Number of messages by category",
"yaxis": {"title": "Count"},
"xaxis": {"title": "Genre"},
"barmode": "group",
},
},
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template("master.html", ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route("/go")
def go():
# save user input in query
query = request.args.get("query", "")
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
"go.html", query=query, classification_result=classification_results
)
def main():
app.run(host="0.0.0.0", port=3001, debug=True)
if __name__ == "__main__":
main()