1010
1111# initialize constants used for redis server
1212EDITOR_QUEUE = "editor_queue"
13- BATCH_SIZE = 1
1413SERVER_SLEEP = 0.25
1514CLIENT_SLEEP = 0.25
1615
@@ -21,45 +20,47 @@ def string_to_datetime(string_dt):
2120 return datetime .datetime (* [int (v ) for v in string_dt .replace ('T' , '-' ).replace (':' , '-' ).split ('-' )])
2221
2322# function used to retrive editor_data from redis and store the results back
24- def classify_process ():
23+ def classify_process (size ):
2524
2625 print ("* Loading model..." )
2726 global model
2827 model = load_model ('static/models/weights/current_lodbrok.h5' )
2928 print ("* Model loaded" )
3029
30+ BATCH_SIZE = size
31+
3132 # All the editor detials are retrived here from redis
3233 queue = db .lrange (EDITOR_QUEUE , 0 , BATCH_SIZE - 1 )
33- editorIDs = []
34-
35- queue = json .loads (queue [0 ])
36- editorIDs .append (queue ["id" ])
3734
38- # changing string datetime to datetime objects
39- queue ["birth_date" ] = string_to_datetime (queue ["birth_date" ])
40- queue ["member_since" ] = string_to_datetime (queue ["member_since" ])
41- queue ["email_confirm_date" ] = string_to_datetime (queue ["email_confirm_date" ])
42- queue ["last_updated" ] = string_to_datetime (queue ["last_updated" ])
43- queue ["last_login_date" ] = string_to_datetime (queue ["last_login_date" ])
4435
45- # preprocessing the given input to get prediction
46- queue = preprocess_editor (queue )
36+ for q in queue :
4737
48- # defining the structure
49- queue = np . array ([ queue ])
38+ q = json . loads ( q )
39+ editor_id = q [ "id" ]
5040
51- # only data from index 1 is considered while predicting, thus
52- # not taking the spam value into consideration
53- predict_data = {
54- "main_input" : np .array (queue [:,1 :10 ]),
55- "email_input" : np .array (queue [:,10 ]),
56- "website_input" : np .array (queue [:,11 ]),
57- "bio_input" : np .array (queue [:,12 :]),
58- }
59-
60- # check to see if we need to process the batch
61- if len (editorIDs ) > 0 :
41+ # changing string datetime to datetime objects
42+ q ["birth_date" ] = string_to_datetime (q ["birth_date" ])
43+ q ["member_since" ] = string_to_datetime (q ["member_since" ])
44+ q ["email_confirm_date" ] = string_to_datetime (q ["email_confirm_date" ])
45+ q ["last_updated" ] = string_to_datetime (q ["last_updated" ])
46+ q ["last_login_date" ] = string_to_datetime (q ["last_login_date" ])
47+
48+ # preprocessing the given input to get prediction
49+ q = preprocess_editor (q )
50+
51+ # defining the structure
52+ q = np .array ([q ])
6253
54+ # only data from index 1 is considered while predicting, thus
55+ # not taking the spam value into consideration
56+ predict_data = {
57+ "main_input" : np .array (q [:,1 :10 ]),
58+ "email_input" : np .array (q [:,10 ]),
59+ "website_input" : np .array (q [:,11 ]),
60+ "bio_input" : np .array (q [:,12 :]),
61+ }
62+
63+
6364 result = model .predict (x = [
6465 predict_data ["main_input" ],
6566 predict_data ["email_input" ],
@@ -83,10 +84,10 @@ def classify_process():
8384 prediction = json .dumps (prediction )
8485
8586 #storign the result in redis
86- db .set (str (editorIDs [ 0 ] ), prediction )
87-
88- # remove the set of editor from our queue
89- db .ltrim (EDITOR_QUEUE , len ( editorIDs ) , - 1 )
87+ db .set (str (editor_id ), prediction )
88+
89+ # remove the set of editor from our queue
90+ db .ltrim (EDITOR_QUEUE , size , - 1 )
9091
9192
9293
0 commit comments