-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathStreamCapture.py
More file actions
330 lines (257 loc) · 12.5 KB
/
StreamCapture.py
File metadata and controls
330 lines (257 loc) · 12.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
# test script to parse screenshots captured from stream to determine which ones are worth uploading for further analysis
import csv
import cv2
import json
import tensorflow as tf
from PIL import Image, ImageFile, ImageOps
import numpy as np
import pyimgur
from colorama import Fore, Style
from time import sleep
import RepoUpdate
from datetime import datetime
import matplotlib.pyplot as plt
import os
ImageFile.LOAD_TRUNCATED_IMAGES=True
SHOW_PREDICTION_IMGS = True # this boolean determines if the script will display the model's predictions as images
# An ordered list of the different possible outputs of the model in the order that they appear in the output array
outputs = ["fish", "no_fish"]
species_classes = ["White Sucker", "Black Bullhead Catfish", "Plains Topminnow", "Brown Trout", "Creek Chub"]
# imgur client id
CLIENT_ID = os.getenv("IMGUR_CLIENT_ID") # TODO set this on the device running this script
print('--------------------------------------------\n' + CLIENT_ID)
# disable scientific notation for clarity
np.set_printoptions(suppress=True)
# load the models
print("loading unary model...")
# the unary model is the primary model that determines if there is a fish in the image or not
# it is called unary because in effect it just counts up the number of frames with fish
unary_model = tf.keras.models.load_model("./models/unary_classifier/keras_model.h5")
print("unary model loaded!")
print("loading species classifier model...")
species_model = tf.keras.models.load_model("./models/species_classifier/keras_model.h5")
print("species classifier model loaded!")
# create the array of the right shape to feed into the keras model
# the 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# the model should only count fish when it sees a new fish
# we want to avoid counting the same fish multiple times, so only increment the counter if there were no fish in the previous frame
# boolean to keep track of if there was any fish in the previous frame
fish_prev_frame = False
clip_date = ''
def run_model(up_image):
global fish_prev_frame
sleep(0.2)
print("looking for fish...")
# loading the image
image = up_image.convert('RGB')
print("opened image...")
# resize the image to a 224x224:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# load the image into the array
data[0] = normalized_image_array
# run the inference
print("running unary model...")
prediction = unary_model.__call__(data)
print("ran unary_model: " + str(prediction))
# making newlines
print("\n\n")
# checking if the program is more then 50% sure
if (prediction[0][0] > float(0.5)):
print("fish detected!")
if SHOW_PREDICTION_IMGS:
cv2.imshow("F I S H ! ! !", image_array)
cv2.waitKey(1)
# check to see if this is a new fish
if not fish_prev_frame:
# print a newline
print("")
# run through the species classifier:
species_prediction = species_model.__call__(data)
res_str = dict(zip(species_classes, species_prediction))
print("ran species_model")
species = dict()
# match each species with the corresponding confidence and determine the most likely species
predicted_fish = ['', 0]
n = 0
for fish_type in species_classes:
species[fish_type] = float(species_prediction[0][n])
if species[fish_type] > predicted_fish[1]:
predicted_fish = [fish_type, species[fish_type]]
n += 1
print(Fore.GREEN + "predicted species: " + predicted_fish[0])
print(Style.RESET_ALL)
# setting a variable to represent the path to the image
PATH = "fish.png"
# connecting to imgur
im = pyimgur.Imgur(CLIENT_ID)
# uploading the image, the variable is the imgur link
uploaded_image = im.upload_image(PATH, title="Uploaded with PyImgur")
# printing the imgur link
#print(uploaded_image.link)
# adding a list to put in the csv
fish_date = clip_date if clip_date != '' else datetime.now()
changes = [[predicted_fish[0], uploaded_image.link, fish_date, "1"]] # new row, including the model's confidence in its decision, link to the image, timestamp, and a blank column that we will use to count the number of fish at some point
# opening the data json in append mode
with open(r"./FishNetStreamCapture/convertcsv.csv","a") as f:
#initalizing the csv writer
writer = csv.writer(f) # writing the new row from the changes list
writer.writerows(changes)
f.close()
# update the graph
csv_file = csv.reader(open("./FishNetStreamCapture/convertcsv.csv"))
times = []
for row in csv_file:
try:
times.append(row[2].split(" ")[0])
except:
pass
# count the number of fish for each day
frequencies = dict()
for date in times:
if not (date in frequencies.keys()): # new date
frequencies[date] = 1
else:
frequencies[date] += 1
#print(frequencies.keys())
# update the json file with the count data (used by the graph generated on the website)
j = {"lables": list(frequencies.keys()), "data": list(frequencies.values())}
jsn = open(r"./FishNetStreamCapture/convertjson.txt", "w") # write changes to json
json.dump(j, jsn)
jsn.close()
ax = plt.subplot(111)
plt.xticks(rotation=90)
plt.xlabel("Date")
plt.ylabel("Fish counted")
plt.title("Fish Counted By Day")
plt.grid(True)
values = []
for value in frequencies.values():
values.append(value)
l = plt.fill_between(frequencies.keys(), values)
plt.gcf().autofmt_xdate()
plt.plot(frequencies.keys(), values)
ax.set_xlim(0, len(frequencies.keys())-1)
ax.set_ylim(0, max(values)+int(max(values)/5))
l.set_facecolors([[.5,.5,.8,.3]])
# change the edge color (bluish and transparentish) and thickness
l.set_edgecolors([[0, 0, .5, .3]])
l.set_linewidths([3])
# add more ticks
ax.set_xticks(np.arange(len((frequencies.keys()))))
# remove tick marks
ax.xaxis.set_tick_params(size=0)
ax.yaxis.set_tick_params(size=0)
# change the color of the top and right spines to opaque gray
ax.spines['right'].set_color((.8,.8,.8))
ax.spines['top'].set_color((.8,.8,.8))
# tweak the axis labels
xlab = ax.xaxis.get_label()
ylab = ax.yaxis.get_label()
xlab.set_style('italic')
xlab.set_size(10)
ylab.set_style('italic')
ylab.set_size(10)
# tweak the title
ttl = ax.title
ttl.set_weight('bold')
try: # delete the file if it exists
os.remove("./FishNetStreamCapture/graph.png")
except:
pass
plt.savefig("./FishNetStreamCapture/graph.png")
ax.clear()
# push the updated repo containing the csv and graph to github
RepoUpdate.git_push()
fish_prev_frame = True
else:
# no fish
if SHOW_PREDICTION_IMGS:
fish_prev_frame = False
cv2.imshow("NOOO F I S H ! ! !", image_array)
cv2.waitKey(1)
if __name__ == '__main__':
analysis = 'local_video' # set to 'live_stream' to analyze live stream
try:
if analysis == 'local_video':
# iterate over each frame in the video file captured locally on the camera pi
for clip in sorted(os.listdir('./clips/Deployment')):
video = './clips/Deployment/' + clip
cap = cv2.VideoCapture(video)
clip_date = clip.split('_')[1].split('.')[0] # just get the date from file name of the form clipN_[date].h264
# iterate over the frames
while cap.isOpened():
# read the current frame
ret, frame = cap.read()
if not ret:
break
frame_image = Image.fromarray(frame)
frame_image.convert('RGB').save('fish.png')
# rotate the image
#frame_image = frame_image.rotate(90)
run_model(frame_image)
# release the video capture object
cap.release()
elif analysis == 'live_stream':
# The following is an example of how the above backend could be used to monitor a live stream running off of the camera Pi
import multiprocessing
import yt_dlp
# define a function that downloads frames from the live stream on a separate thread
def start_download(url, ydl_opts):
print('starting download')
yt_dlp.YoutubeDL(ydl_opts).download([url])
print('ending download')
# set up vidgear to capture images from the youtube stream
url = 'https://www.youtube.com/watch?v=lXzSU7ezjp8'
# define the options for downloading the video
ydl_opts = {
'format': 'best[ext=mp4]',
'quiet': True,
'no_warnings': True,
'outtmpl': './clips/live.mp4'
}
# remove previous downloads
if os.path.exists('./clips/live.mp4.part'):
os.remove('./clips/live.mp4.part')
if os.path.exists('./clips/live.mp4'):
os.remove('./clips/live.mp4')
if os.path.exists('./clips/saved.mp4'):
os.remove('./clips/saved.mp4')
# start the download process
download_proc = multiprocessing.Process(target=start_download, args=(url, ydl_opts,))
download_proc.start()
# start with 2 seconds of video to run the model on
while not os.path.exists('./clips/live.mp4.part'):
pass
time.sleep(2)
shutil.copyfile('./clips/live.mp4.part', './clips/saved.mp4')
StreamCapture.SHOW_PREDICTION_IMGS = False
# try running FishNET!
# open the downloaded video using OpenCV
last_frame = 1
cap = cv2.VideoCapture('./clips/saved.mp4')
cap.set(cv2.CAP_PROP_POS_FRAMES, last_frame)
while True:
# read from the downloaded portion of the live stream
ret, frame = cap.read() # get the latest frame from the live stream
# if there are no more frames, copy the contents of live.mp4 to saved.mp4
if not ret:
shutil.copyfile('./clips/live.mp4.part', './clips/saved.mp4')
# reopen the file with cv2 making sure to resume at the last frame
last_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
cap = cv2.VideoCapture('./clips/saved.mp4')
cap.set(cv2.CAP_PROP_POS_FRAMES, last_frame)
continue
frame_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
frame_image.convert('RGB').save('fish.png')
StreamCapture.run_model(frame_image)
except KeyboardInterrupt:
cap.release()
print('exiting...')