Skip to content
This repository was archived by the owner on Apr 6, 2021. It is now read-only.

Commit a5d578f

Browse files
committed
added flask + gunicorn + haproxy
1 parent feb3e46 commit a5d578f

File tree

7 files changed

+176
-0
lines changed

7 files changed

+176
-0
lines changed

README.md

+1
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
2. Multiple Flask with Nginx Loadbalancer
4545
3. Multiple Flask SocketIO with Nginx Loadbalancer
4646
4. RabbitMQ and multiple Celery with Flask
47+
5. Flask + Gunicorn + HAproxy
4748

4849
### [Big data piping](piping)
4950

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
FROM python:3.6.1 AS base
2+
3+
RUN pip3 install -U textblob ekphrasis
4+
RUN pip3 install flask gunicorn
5+
RUN python3 -m nltk.downloader punkt
6+
RUN python3 -m nltk.downloader wordnet
7+
RUN python3 -m nltk.downloader stopwords
8+
RUN python3 -m textblob.download_corpora
9+
10+
WORKDIR /app
11+
12+
COPY . /app
13+
14+
RUN echo
15+
16+
ENV LC_ALL C.UTF-8
17+
ENV LANG C.UTF-8
18+
19+
EXPOSE 5000
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
## How-to
2+
3+
1. Run `docker-compose`,
4+
```bash
5+
docker-compose up --build
6+
```
7+
8+
2. Scale `sentiment` as many you want,
9+
```bash
10+
docker-compose scale sentiment=2
11+
```
12+
13+
You can see the output from Haproxy,
14+
```text
15+
sentiment-haproxy | server 5flask-gunicorn-haproxy_sentiment_1 5flask-gunicorn-haproxy_sentiment_1:5000 check inter 2000 rise 2 fall 3
16+
sentiment-haproxy | server 5flask-gunicorn-haproxy_sentiment_2 5flask-gunicorn-haproxy_sentiment_2:5000 check inter 2000 rise 2 fall 3
17+
```
18+
19+
3. Visit [localhost:5000/classify](http://localhost:5000/classify?text=%20i%20hate%20u%20man),
20+
```text
21+
{"polarity":-0.8,"subjectivity":0.9}
22+
```
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
from flask import Flask, request, jsonify
2+
from textblob import TextBlob
3+
from ekphrasis.classes.preprocessor import TextPreProcessor
4+
from ekphrasis.classes.tokenizer import SocialTokenizer
5+
from ekphrasis.dicts.emoticons import emoticons
6+
from ekphrasis.classes.spellcorrect import SpellCorrector
7+
8+
text_processor = TextPreProcessor(
9+
normalize = [
10+
'url',
11+
'email',
12+
'percent',
13+
'money',
14+
'phone',
15+
'user',
16+
'time',
17+
'url',
18+
'date',
19+
'number',
20+
],
21+
annotate = {
22+
'hashtag',
23+
'allcaps',
24+
'elongated',
25+
'repeated',
26+
'emphasis',
27+
'censored',
28+
},
29+
fix_html = True,
30+
segmenter = 'twitter',
31+
corrector = 'twitter',
32+
unpack_hashtags = True,
33+
unpack_contractions = True,
34+
spell_correct_elong = False,
35+
tokenizer = SocialTokenizer(lowercase = True).tokenize,
36+
dicts = [emoticons],
37+
)
38+
39+
sp = SpellCorrector(corpus = 'english')
40+
app = Flask(__name__)
41+
42+
43+
def process_text(string):
44+
return ' '.join(
45+
[
46+
sp.correct(c)
47+
for c in text_processor.pre_process_doc(string)
48+
if '<' not in c
49+
and '>' not in c
50+
and c not in ',!;:{}\'"!@#$%^&*(01234567890?/|\\'
51+
]
52+
)
53+
54+
55+
@app.route('/', methods = ['GET'])
56+
def hello():
57+
return 'Hello!'
58+
59+
60+
@app.route('/classify', methods = ['GET'])
61+
def classify():
62+
text = request.args.get('text')
63+
result = TextBlob(process_text(text))
64+
return jsonify(
65+
{
66+
'polarity': result.sentiment.polarity,
67+
'subjectivity': result.sentiment.subjectivity,
68+
}
69+
)
70+
71+
72+
application = app
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
version: '3'
2+
3+
services:
4+
sentiment:
5+
restart: always
6+
build:
7+
context: .
8+
environment:
9+
SERVICE_PORTS: 5000
10+
command: bash start.sh 2
11+
lstm-haproxy:
12+
image: dockercloud/haproxy
13+
depends_on:
14+
- sentiment
15+
links:
16+
- sentiment
17+
ports:
18+
- '5000:80'
19+
container_name: sentiment-haproxy
20+
volumes:
21+
- /var/run/docker.sock:/var/run/docker.sock
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
from ekphrasis.classes.preprocessor import TextPreProcessor
2+
from ekphrasis.classes.tokenizer import SocialTokenizer
3+
from ekphrasis.dicts.emoticons import emoticons
4+
from ekphrasis.classes.spellcorrect import SpellCorrector
5+
6+
text_processor = TextPreProcessor(
7+
normalize = [
8+
'url',
9+
'email',
10+
'percent',
11+
'money',
12+
'phone',
13+
'user',
14+
'time',
15+
'url',
16+
'date',
17+
'number',
18+
],
19+
annotate = {
20+
'hashtag',
21+
'allcaps',
22+
'elongated',
23+
'repeated',
24+
'emphasis',
25+
'censored',
26+
},
27+
fix_html = True,
28+
segmenter = 'twitter',
29+
corrector = 'twitter',
30+
unpack_hashtags = True,
31+
unpack_contractions = True,
32+
spell_correct_elong = False,
33+
tokenizer = SocialTokenizer(lowercase = True).tokenize,
34+
dicts = [emoticons],
35+
)
36+
37+
sp = SpellCorrector(corpus = 'english')
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
NUM_WORKER=$1
2+
BIND_ADDR=0.0.0.0:5000
3+
python3 initial.py
4+
gunicorn --graceful-timeout 30 --reload --max-requests 10 --timeout 180 -w $NUM_WORKER -b $BIND_ADDR -k sync app

0 commit comments

Comments
 (0)