· 3 years ago · Mar 05, 2022, 09:30 AM
1root@30656f8d7a9e:/etc/zm# cat objectconfig.ini
2# Configuration file for object detection
3
4# NOTE: ALL parameters here can be overriden
5# on a per monitor basis if you want. Just
6# duplicate it inside the correct [monitor-<num>] section
7
8# You can create your own custom attributes in the [custom] section
9
10[general]
11
12# Please don't change this. It is used by the config upgrade script
13version=1.2
14
15# You can now limit the # of detection process
16# per target processor. If not specified, default is 1
17# Other detection processes will wait to acquire lock
18
19cpu_max_processes=3
20tpu_max_processes=1
21gpu_max_processes=1
22
23# Time to wait in seconds per processor to be free, before
24# erroring out. Default is 120 (2 mins)
25cpu_max_lock_wait=100
26tpu_max_lock_wait=100
27gpu_max_lock_wait=100
28
29
30#pyzm_overrides={'conf_path':'/etc/zm','log_level_debug':0}
31pyzm_overrides={'log_level_debug':5}
32
33# This is an optional file
34# If specified, you can specify tokens with secret values in that file
35# and onlt refer to the tokens in your main config file
36secrets = /etc/zm/secrets.ini
37
38# portal/user/password are needed if you plan on using ZM's legacy
39# auth mechanism to get images
40portal=!ZM_PORTAL
41user=!ZM_USER
42password=!ZM_PASSWORD
43
44# api portal is needed if you plan to use tokens to get images
45# requires ZM 1.33 or above
46api_portal=!ZM_API_PORTAL
47
48allow_self_signed=yes
49# if yes, last detection will be stored for monitors
50# and bounding boxes that match, along with labels
51# will be discarded for new detections. This may be helpful
52# in getting rid of static objects that get detected
53# due to some motion.
54match_past_detections=no
55# The max difference in area between the objects if match_past_detection is on
56# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
57# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
58# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
59# Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
60# example:
61# person_past_det_max_diff_area=5%
62# car_past_det_max_diff_area=5000px
63past_det_max_diff_area=5%
64
65# this is the maximum size a detected object can have. You can specify it in px or % just like past_det_max_diff_area
66# This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
67# I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
68# and therefore I set mine to 70% because I know any valid detected objected cannot be larger than that area
69
70max_detection_size=90%
71
72# sequence of models to run for detection
73detection_sequence=object,face,alpr
74# if all, then we will loop through all models
75# if first then the first success will break out
76detection_mode=all
77
78# If you need basic auth to access ZM
79#basic_user=user
80#basic_password=password
81
82# base data path for various files the ES+OD needs
83# we support in config variable substitution as well
84base_data_path=/var/lib/zmeventnotification
85
86# global settings for
87# bestmatch, alarm, snapshot OR a specific frame ID
88frame_id=bestmatch
89
90# this is the to resize the image before analysis is done
91resize=800
92# set to yes, if you want to remove images after analysis
93# setting to yes is recommended to avoid filling up space
94# keep to no while debugging/inspecting masks
95# Note this does NOT delete debug images later
96delete_after_analyze=yes
97
98# If yes, will write an image called <filename>-bbox.jpg as well
99# which contains the bounding boxes. This has NO relation to
100# write_image_to_zm
101# Typically, if you enable delete_after_analyze you may
102# also want to set write_debug_image to no.
103write_debug_image=no
104
105# if yes, will write an image with bounding boxes
106# this needs to be yes to be able to write a bounding box
107# image to ZoneMinder that is visible from its console
108write_image_to_zm=yes
109
110
111# Adds percentage to detections
112# hog/face shows 100% always
113show_percent=yes
114
115# color to be used to draw the polygons you specified
116poly_color=(255,255,255)
117poly_thickness=2
118#import_zm_zones=yes
119only_triggered_zm_zones=no
120
121# This section gives you an option to get brief animations
122# of the event, delivered as part of the push notification to mobile devices
123# Animations are created only if an object is detected
124#
125# NOTE: This will DELAY the time taken to send you push notifications
126# It will try to first creat the animation, which may take upto a minute
127# depending on how soon it gets access to frames. See notes below
128
129[animation]
130
131# If yes, object detection will attempt to create
132# a short GIF file around the object detection frame
133# that can be sent via push notifications for instant playback
134# Note this required additional software support. Default:no
135create_animation=no
136
137# Format of animation burst
138# valid options are "mp4", "gif", "mp4,gif"
139# Note that gifs will be of a shorter duration
140# as they take up much more disk space than mp4
141animation_types='mp4,gif'
142
143# default width of animation image. Be cautious when you increase this
144# most mobile platforms give a very brief amount of time (in seconds)
145# to download the image.
146# Given your ZM instance will be serving the image, it will anyway be slow
147# Making the total animation size bigger resulted in the notification not
148# getting an image at all (timed out)
149animation_width=640
150
151# When an event is detected, ZM it writes frames a little late
152# On top of that, it looks like with caching enabled, the API layer doesn't
153# get access to DB records for much longer (around 30 seconds), at least on my
154# system. animation_retry_sleep refers to how long to wait before trying to grab
155# frame information if it failed. animation_max_tries defines how many times it
156# will try and retrieve frames before it gives up
157animation_retry_sleep=15
158animation_max_tries=4
159
160# if animation_types is gif then when can generate a fast preview gif
161# every second frame is skipped and the frame rate doubled
162# to give quick preview, Default (no)
163fast_gif=no
164
165[remote]
166# You can now run the machine learning code on a different server
167# This frees up your ZM server for other things
168# To do this, you need to setup https://github.com/pliablepixels/mlapi
169# on your desired server and confiure it with a user. See its instructions
170# once set up, you can choose to do object/face recognition via that
171# external serer
172
173# URL that will be used
174#ml_gateway=http://192.168.1.183:5000/api/v1
175#ml_gateway=http://10.6.1.13:5000/api/v1
176#ml_gateway=http://192.168.1.21:5000/api/v1
177#ml_gateway=http://10.9.0.2:5000/api/v1
178#ml_fallback_local=yes
179# API/password for remote gateway
180ml_user=!ML_USER
181ml_password=!ML_PASSWORD
182
183
184# config for object
185[object]
186
187# If you are using legacy format (use_sequence=no) then these parameters will
188# be used during ML inferencing
189object_detection_pattern=(person|car|motorbike|bus|truck|boat)
190object_min_confidence=0.3
191object_framework=coral_edgetpu
192object_processor=tpu
193object_weights={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
194object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
195
196# If you are using the new ml_sequence format (use_sequence=yes) then
197# you can fiddle with these parameters and look at ml_sequence later
198# Note that these can be named anything. You can add custom variables, ad-infinitum
199
200# Google Coral
201# The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
202tpu_object_weights_mobiledet={{base_data_path}}/models/coral_edgetpu/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
203tpu_object_weights_mobilenet={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
204tpu_object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
205tpu_object_framework=coral_edgetpu
206tpu_object_processor=tpu
207tpu_min_confidence=0.6
208
209# Yolo v4 on GPU (falls back to CPU if no GPU)
210yolo4_object_weights={{base_data_path}}/models/yolov4/yolov4.weights
211yolo4_object_labels={{base_data_path}}/models/yolov4/coco.names
212yolo4_object_config={{base_data_path}}/models/yolov4/yolov4.cfg
213yolo4_object_framework=opencv
214yolo4_object_processor=gpu
215
216# Yolo v3 on GPU (falls back to CPU if no GPU)
217yolo3_object_weights={{base_data_path}}/models/yolov3/yolov3.weights
218yolo3_object_labels={{base_data_path}}/models/yolov3/coco.names
219yolo3_object_config={{base_data_path}}/models/yolov3/yolov3.cfg
220yolo3_object_framework=opencv
221yolo3_object_processor=gpu
222
223# Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
224tinyyolo_object_config={{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg
225tinyyolo_object_weights={{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights
226tinyyolo_object_labels={{base_data_path}}/models/tinyyolov4/coco.names
227tinyyolo_object_framework=opencv
228tinyyolo_object_processor=gpu
229
230
231[face]
232face_detection_pattern=.*
233known_images_path={{base_data_path}}/known_faces
234unknown_images_path={{base_data_path}}/unknown_faces
235save_unknown_faces=yes
236save_unknown_faces_leeway_pixels=100
237face_detection_framework=dlib
238
239# read https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems
240# read https://github.com/ageitgey/face_recognition#automatically-find-all-the-faces-in-an-image
241# and play around
242
243# quick overview:
244# num_jitters is how many times to distort images
245# upsample_times is how many times to upsample input images (for small faces, for example)
246# model can be hog or cnn. cnn may be more accurate, but I haven't found it to be
247
248face_num_jitters=1
249face_model=cnn
250face_upsample_times=1
251
252# This is maximum distance of the face under test to the closest matched
253# face cluster. The larger this distance, larger the chances of misclassification.
254#
255face_recog_dist_threshold=0.6
256# When we are first training the face recognition model with known faces,
257# by default we use hog because we assume you will supply well lit, front facing faces
258# However, if you are planning to train with profile photos or hard to see faces, you
259# may want to change this to cnn. Note that this increases training time, but training only
260# happens once, unless you retrain again by removing the training model
261face_train_model=cnn
262#if a face doesn't match known names, we will detect it as 'unknown face'
263# you can change that to something that suits your personality better ;-)
264#unknown_face_name=invader
265
266[alpr]
267alpr_detection_pattern=.*
268alpr_use_after_detection_only=yes
269# Many of the ALPR providers offer both a cloud version
270# and local SDK version. Sometimes local SDK format differs from
271# the cloud instance. Set this to local or cloud. Default cloud
272alpr_api_type=cloud
273
274# -----| If you are using plate recognizer | ------
275alpr_service=plate_recognizer
276#alpr_service=open_alpr_cmdline
277
278# If you want to host a local SDK https://app.platerecognizer.com/sdk/
279#alpr_url=http://192.168.1.21:8080/alpr
280# Plate recog replace with your api key
281alpr_key=!PLATEREC_ALPR_KEY
282# if yes, then it will log usage statistics of the ALPR service
283platerec_stats=yes
284# If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
285#platerec_regions=['us','cn','kr']
286# minimal confidence for actually detecting a plate
287platerec_min_dscore=0.1
288# minimal confidence for the translated text
289platerec_min_score=0.2
290
291
292# ----| If you are using openALPR |-----
293#alpr_service=open_alpr
294#alpr_key=!OPENALPR_ALPR_KEY
295
296# For an explanation of params, see http://doc.openalpr.com/api/?api=cloudapi
297#openalpr_recognize_vehicle=1
298#openalpr_country=us
299#openalpr_state=ca
300# openalpr returns percents, but we convert to between 0 and 1
301#openalpr_min_confidence=0.3
302
303# ----| If you are using openALPR command line |-----
304
305openalpr_cmdline_binary=alpr
306
307# Do an alpr -help to see options, plug them in here
308# like say '-j -p ca -c US' etc.
309# keep the -j because its JSON
310
311# Note that alpr_pattern is honored
312# For the rest, just stuff them in the cmd line options
313
314openalpr_cmdline_params=-j -d
315openalpr_cmdline_min_confidence=0.3
316
317
318## Monitor specific settings
319
320
321# Examples:
322# Let's assume your monitor ID is 999
323[monitor-999]
324# my driveway
325match_past_detections=no
326wait=5
327object_detection_pattern=(person)
328
329# Advanced example - here we want anything except potted plant
330# exclusion in regular expressions is not
331# as straightforward as you may think, so
332# follow this pattern
333# object_detection_pattern = ^(?!object1|object2|objectN)
334# the characters in front implement what is
335# called a negative look ahead
336
337# object_detection_pattern=^(?!potted plant|pottedplant|bench|broccoli)
338#alpr_detection_pattern=^(.*x11)
339#delete_after_analyze=no
340#detection_pattern=.*
341#import_zm_zones=yes
342
343# polygon areas where object detection will be done.
344# You can name them anything except the keywords defined in the optional
345# params below. You can put as many polygons as you want per [monitor-<mid>]
346# (see examples).
347
348my_driveway=306,356 1003,341 1074,683 154,715
349
350# You are now allowed to specify detection pattern per zone
351# the format is <polygonname>_zone_detection_pattern=<regexp>
352# So if your polygon is called my_driveway, its associated2) objectconfig.ini -
353root@30656f8d7a9e:/etc/zm# cat objectconfig.ini
354# Configuration file for object detection
355
356# NOTE: ALL parameters here can be overriden
357# on a per monitor basis if you want. Just
358# duplicate it inside the correct [monitor-<num>] section
359
360# You can create your own custom attributes in the [custom] section
361
362[general]
363
364# Please don't change this. It is used by the config upgrade script
365version=1.2
366
367# You can now limit the # of detection process
368# per target processor. If not specified, default is 1
369# Other detection processes will wait to acquire lock
370
371cpu_max_processes=3
372tpu_max_processes=1
373gpu_max_processes=1
374
375# Time to wait in seconds per processor to be free, before
376# erroring out. Default is 120 (2 mins)
377cpu_max_lock_wait=100
378tpu_max_lock_wait=100
379gpu_max_lock_wait=100
380
381
382#pyzm_overrides={'conf_path':'/etc/zm','log_level_debug':0}
383pyzm_overrides={'log_level_debug':5}
384
385# This is an optional file
386# If specified, you can specify tokens with secret values in that file
387# and onlt refer to the tokens in your main config file
388secrets = /etc/zm/secrets.ini
389
390# portal/user/password are needed if you plan on using ZM's legacy
391# auth mechanism to get images
392portal=!ZM_PORTAL
393user=!ZM_USER
394password=!ZM_PASSWORD
395
396# api portal is needed if you plan to use tokens to get images
397# requires ZM 1.33 or above
398api_portal=!ZM_API_PORTAL
399
400allow_self_signed=yes
401# if yes, last detection will be stored for monitors
402# and bounding boxes that match, along with labels
403# will be discarded for new detections. This may be helpful
404# in getting rid of static objects that get detected
405# due to some motion.
406match_past_detections=no
407# The max difference in area between the objects if match_past_detection is on
408# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
409# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
410# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
411# Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
412# example:
413# person_past_det_max_diff_area=5%
414# car_past_det_max_diff_area=5000px
415past_det_max_diff_area=5%
416
417# this is the maximum size a detected object can have. You can specify it in px or % just like past_det_max_diff_area
418# This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
419# I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
420# and therefore I set mine to 70% because I know any valid detected objected cannot be larger than that area
421
422max_detection_size=90%
423
424# sequence of models to run for detection
425detection_sequence=object,face,alpr
426# if all, then we will loop through all models
427# if first then the first success will break out
428detection_mode=all
429
430# If you need basic auth to access ZM
431#basic_user=user
432#basic_password=password
433
434# base data path for various files the ES+OD needs
435# we support in config variable substitution as well
436base_data_path=/var/lib/zmeventnotification
437
438# global settings for
439# bestmatch, alarm, snapshot OR a specific frame ID
440frame_id=bestmatch
441
442# this is the to resize the image before analysis is done
443resize=800
444# set to yes, if you want to remove images after analysis
445# setting to yes is recommended to avoid filling up space
446# keep to no while debugging/inspecting masks
447# Note this does NOT delete debug images later
448delete_after_analyze=yes
449
450# If yes, will write an image called <filename>-bbox.jpg as well
451# which contains the bounding boxes. This has NO relation to
452# write_image_to_zm
453# Typically, if you enable delete_after_analyze you may
454# also want to set write_debug_image to no.
455write_debug_image=no
456
457# if yes, will write an image with bounding boxes
458# this needs to be yes to be able to write a bounding box
459# image to ZoneMinder that is visible from its console
460write_image_to_zm=yes
461
462
463# Adds percentage to detections
464# hog/face shows 100% always
465show_percent=yes
466
467# color to be used to draw the polygons you specified
468poly_color=(255,255,255)
469poly_thickness=2
470#import_zm_zones=yes
471only_triggered_zm_zones=no
472
473# This section gives you an option to get brief animations
474# of the event, delivered as part of the push notification to mobile devices
475# Animations are created only if an object is detected
476#
477# NOTE: This will DELAY the time taken to send you push notifications
478# It will try to first creat the animation, which may take upto a minute
479# depending on how soon it gets access to frames. See notes below
480
481[animation]
482
483# If yes, object detection will attempt to create
484# a short GIF file around the object detection frame
485# that can be sent via push notifications for instant playback
486# Note this required additional software support. Default:no
487create_animation=no
488
489# Format of animation burst
490# valid options are "mp4", "gif", "mp4,gif"
491# Note that gifs will be of a shorter duration
492# as they take up much more disk space than mp4
493animation_types='mp4,gif'
494
495# default width of animation image. Be cautious when you increase this
496# most mobile platforms give a very brief amount of time (in seconds)
497# to download the image.
498# Given your ZM instance will be serving the image, it will anyway be slow
499# Making the total animation size bigger resulted in the notification not
500# getting an image at all (timed out)
501animation_width=640
502
503# When an event is detected, ZM it writes frames a little late
504# On top of that, it looks like with caching enabled, the API layer doesn't
505# get access to DB records for much longer (around 30 seconds), at least on my
506# system. animation_retry_sleep refers to how long to wait before trying to grab
507# frame information if it failed. animation_max_tries defines how many times it
508# will try and retrieve frames before it gives up
509animation_retry_sleep=15
510animation_max_tries=4
511
512# if animation_types is gif then when can generate a fast preview gif
513# every second frame is skipped and the frame rate doubled
514# to give quick preview, Default (no)
515fast_gif=no
516
517[remote]
518# You can now run the machine learning code on a different server
519# This frees up your ZM server for other things
520# To do this, you need to setup https://github.com/pliablepixels/mlapi
521# on your desired server and confiure it with a user. See its instructions
522# once set up, you can choose to do object/face recognition via that
523# external serer
524
525# URL that will be used
526#ml_gateway=http://192.168.1.183:5000/api/v1
527#ml_gateway=http://10.6.1.13:5000/api/v1
528#ml_gateway=http://192.168.1.21:5000/api/v1
529#ml_gateway=http://10.9.0.2:5000/api/v1
530#ml_fallback_local=yes
531# API/password for remote gateway
532ml_user=!ML_USER
533ml_password=!ML_PASSWORD
534
535
536# config for object
537[object]
538
539# If you are using legacy format (use_sequence=no) then these parameters will
540# be used during ML inferencing
541object_detection_pattern=(person|car|motorbike|bus|truck|boat)
542object_min_confidence=0.3
543object_framework=coral_edgetpu
544object_processor=tpu
545object_weights={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
546object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
547
548# If you are using the new ml_sequence format (use_sequence=yes) then
549# you can fiddle with these parameters and look at ml_sequence later
550# Note that these can be named anything. You can add custom variables, ad-infinitum
551
552# Google Coral
553# The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
554tpu_object_weights_mobiledet={{base_data_path}}/models/coral_edgetpu/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
555tpu_object_weights_mobilenet={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
556tpu_object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
557tpu_object_framework=coral_edgetpu
558tpu_object_processor=tpu
559tpu_min_confidence=0.6
560
561# Yolo v4 on GPU (falls back to CPU if no GPU)
562yolo4_object_weights={{base_data_path}}/models/yolov4/yolov4.weights
563yolo4_object_labels={{base_data_path}}/models/yolov4/coco.names
564yolo4_object_config={{base_data_path}}/models/yolov4/yolov4.cfg
565yolo4_object_framework=opencv
566yolo4_object_processor=gpu
567
568# Yolo v3 on GPU (falls back to CPU if no GPU)
569yolo3_object_weights={{base_data_path}}/models/yolov3/yolov3.weights
570yolo3_object_labels={{base_data_path}}/models/yolov3/coco.names
571yolo3_object_config={{base_data_path}}/models/yolov3/yolov3.cfg
572yolo3_object_framework=opencv
573yolo3_object_processor=gpu
574
575# Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
576tinyyolo_object_config={{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg
577tinyyolo_object_weights={{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights
578tinyyolo_object_labels={{base_data_path}}/models/tinyyolov4/coco.names
579tinyyolo_object_framework=opencv
580tinyyolo_object_processor=gpu
581
582
583[face]
584face_detection_pattern=.*
585known_images_path={{base_data_path}}/known_faces
586unknown_images_path={{base_data_path}}/unknown_faces
587save_unknown_faces=yes
588save_unknown_faces_leeway_pixels=100
589face_detection_framework=dlib
590
591# read https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems
592# read https://github.com/ageitgey/face_recognition#automatically-find-all-the-faces-in-an-image
593# and play around
594
595# quick overview:
596# num_jitters is how many times to distort images
597# upsample_times is how many times to upsample input images (for small faces, for example)
598# model can be hog or cnn. cnn may be more accurate, but I haven't found it to be
599
600face_num_jitters=1
601face_model=cnn
602face_upsample_times=1
603
604# This is maximum distance of the face under test to the closest matched
605# face cluster. The larger this distance, larger the chances of misclassification.
606#
607face_recog_dist_threshold=0.6
608# When we are first training the face recognition model with known faces,
609# by default we use hog because we assume you will supply well lit, front facing faces
610# However, if you are planning to train with profile photos or hard to see faces, you
611# may want to change this to cnn. Note that this increases training time, but training only
612# happens once, unless you retrain again by removing the training model
613face_train_model=cnn
614#if a face doesn't match known names, we will detect it as 'unknown face'
615# you can change that to something that suits your personality better ;-)
616#unknown_face_name=invader
617
618[alpr]
619alpr_detection_pattern=.*
620alpr_use_after_detection_only=yes
621# Many of the ALPR providers offer both a cloud version
622# and local SDK version. Sometimes local SDK format differs from
623# the cloud instance. Set this to local or cloud. Default cloud
624alpr_api_type=cloud
625
626# -----| If you are using plate recognizer | ------
627alpr_service=plate_recognizer
628#alpr_service=open_alpr_cmdline
629
630# If you want to host a local SDK https://app.platerecognizer.com/sdk/
631#alpr_url=http://192.168.1.21:8080/alpr
632# Plate recog replace with your api key
633alpr_key=!PLATEREC_ALPR_KEY
634# if yes, then it will log usage statistics of the ALPR service
635platerec_stats=yes
636# If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
637#platerec_regions=['us','cn','kr']
638# minimal confidence for actually detecting a plate
639platerec_min_dscore=0.1
640# minimal confidence for the translated text
641platerec_min_score=0.2
642
643
644# ----| If you are using openALPR |-----
645#alpr_service=open_alpr
646#alpr_key=!OPENALPR_ALPR_KEY
647
648# For an explanation of params, see http://doc.openalpr.com/api/?api=cloudapi
649#openalpr_recognize_vehicle=1
650#openalpr_country=us
651#openalpr_state=ca
652# openalpr returns percents, but we convert to between 0 and 1
653#openalpr_min_confidence=0.3
654
655# ----| If you are using openALPR command line |-----
656
657openalpr_cmdline_binary=alpr
658
659# Do an alpr -help to see options, plug them in here
660# like say '-j -p ca -c US' etc.
661# keep the -j because its JSON
662
663# Note that alpr_pattern is honored
664# For the rest, just stuff them in the cmd line options
665
666openalpr_cmdline_params=-j -d
667openalpr_cmdline_min_confidence=0.3
668
669
670## Monitor specific settings
671
672
673# Examples:
674# Let's assume your monitor ID is 999
675[monitor-999]
676# my driveway
677match_past_detections=no
678wait=5
679object_detection_pattern=(person)
680
681# Advanced example - here we want anything except potted plant
682# exclusion in regular expressions is not
683# as straightforward as you may think, so
684# follow this pattern
685# object_detection_pattern = ^(?!object1|object2|objectN)
686# the characters in front implement what is
687# called a negative look ahead
688
689# object_detection_pattern=^(?!potted plant|pottedplant|bench|broccoli)
690#alpr_detection_pattern=^(.*x11)
691#delete_after_analyze=no
692#detection_pattern=.*
693#import_zm_zones=yes
694
695# polygon areas where object detection will be done.
696# You can name them anything except the keywords defined in the optional
697# params below. You can put as many polygons as you want per [monitor-<mid>]
698# (see examples).
699
700my_driveway=306,356 1003,341 1074,683 154,715
701
702# You are now allowed to specify detection pattern per zone
703# the format is <polygonname>_zone_detection_pattern=<regexp>
704# So if your polygon is called my_driveway, its associated
705# detection pattern will be my_driveway_zone_detection_pattern
706# If none is specified, the value in object_detection_pattern
707# will be used
708# This also applies to ZM zones. Let's assume you have
709# import_zm_zones=yes and let's suppose you have a zone in ZM
710# called Front_Door. In that case, all you need to do is put in a
711# front_door_zone_detection_pattern=(person|car) here
712#
713# NOTE: ZM Zones are converted to lowercase, and spaces are replaced
714# with underscores@3
715
716my_driveway_zone_detection_pattern=(person)
717some_other_area=0,0 200,300 700,900
718# use license plate recognition for my driveway
719# see alpr section later for more data needed
720resize=no
721detection_sequence=object,alpr
722
723
724[ml]
725# When enabled, you can specify complex ML inferencing logic in ml_sequence
726# Anything specified in ml_sequence will override any other ml attributes
727
728# Also, when enabled, stream_sequence will override any other frame related
729# attributes
730use_sequence = yes
731
732# if enabled, will not grab exclusive locks before running inferencing
733# locking seems to cause issues on some unique file systems
734disable_locks= no
735
736# Chain of frames
737# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
738# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence.detect_stream
739# Very important: Make sure final ending brace is indented
740stream_sequence = {
741 'frame_strategy': 'most_models',
742 'frame_set': 'snapshot,alarm',
743 'contig_frames_before_error': 5,
744 'max_attempts': 3,
745 'sleep_between_attempts': 4,
746 'resize':800
747
748 }
749
750# Chain of ML models to use
751# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
752# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence
753# Very important: Make sure final ending brace is indented
754ml_sequence= {
755 'general': {
756 'model_sequence': 'object,face,alpr',
757 'disable_locks': '{{disable_locks}}',
758 'match_past_detections': '{{match_past_detections}}',
759 'past_det_max_diff_area': '5%',
760 'car_past_det_max_diff_area': '10%',
761 #'ignore_past_detection_labels': ['dog', 'cat']
762
763 },
764 'object': {
765 'general':{
766 'pattern':'{{object_detection_pattern}}',
767 'same_model_sequence_strategy': 'first' # also 'most', 'most_unique's
768 },
769 'sequence': [{
770 #First run on TPU with higher confidence
771 'name': 'TPU object detection',
772 'enabled': 'no',
773 'object_weights':'{{tpu_object_weights_mobiledet}}',
774 'object_labels': '{{tpu_object_labels}}',
775 'object_min_confidence': {{tpu_min_confidence}},
776 'object_framework':'{{tpu_object_framework}}',
777 'tpu_max_processes': {{tpu_max_processes}},
778 'tpu_max_lock_wait': {{tpu_max_lock_wait}},
779 'max_detection_size':'{{max_detection_size}}'
780
781
782 },
783 {
784 # YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
785 'name': 'YoloV4 GPU/CPU',
786 'enabled': 'yes', # don't really need to say this explictly
787 'object_config':'{{yolo4_object_config}}',
788 'object_weights':'{{yolo4_object_weights}}',
789 'object_labels': '{{yolo4_object_labels}}',
790 'object_min_confidence': {{object_min_confidence}},
791 'object_framework':'{{yolo4_object_framework}}',
792 'object_processor': '{{yolo4_object_processor}}',
793 'gpu_max_processes': {{gpu_max_processes}},
794 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
795 'cpu_max_processes': {{cpu_max_processes}},
796 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
797 'max_detection_size':'{{max_detection_size}}'
798
799 }]
800 },
801 'face': {
802 'general':{
803 'pattern': '{{face_detection_pattern}}',
804 'same_model_sequence_strategy': 'union' # combines all outputs of this sequence
805 },
806 'sequence': [
807 {
808 'name': 'TPU face detection',
809 'enabled': 'no',
810 'face_detection_framework': 'tpu',
811 'face_weights':'/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
812 'face_min_confidence': 0.3,
813
814 },
815 {
816 'name': 'DLIB based face recognition',
817 'enabled': 'yes',
818 #'pre_existing_labels': ['face'], # If you use TPU detection first, we can run this ONLY if TPU detects a face first
819 'save_unknown_faces':'{{save_unknown_faces}}',
820 'save_unknown_faces_leeway_pixels':{{save_unknown_faces_leeway_pixels}},
821 'face_detection_framework': '{{face_detection_framework}}',
822 'known_images_path': '{{known_images_path}}',
823 'unknown_images_path': '{{unknown_images_path}}',
824 'face_model': '{{face_model}}',
825 'face_train_model': '{{face_train_model}}',
826 'face_recog_dist_threshold': '{{face_recog_dist_threshold}}',
827 'face_num_jitters': '{{face_num_jitters}}',
828 'face_upsample_times':'{{face_upsample_times}}',
829 'gpu_max_processes': {{gpu_max_processes}},
830 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
831 'cpu_max_processes': {{cpu_max_processes}},
832 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
833 'max_size':800
834 }]
835 },
836
837 'alpr': {
838 'general':{
839 'same_model_sequence_strategy': 'first',
840 'pre_existing_labels':['car', 'motorbike', 'bus', 'truck', 'boat'],
841 'pattern': '{{alpr_detection_pattern}}'
842
843 },
844 'sequence': [{
845 'name': 'Platerecognizer cloud',
846 'enabled': 'yes',
847 'alpr_api_type': '{{alpr_api_type}}',
848 'alpr_service': '{{alpr_service}}',
849 'alpr_key': '{{alpr_key}}',
850 'platrec_stats': '{{platerec_stats}}',
851 'platerec_min_dscore': {{platerec_min_dscore}},
852 'platerec_min_score': {{platerec_min_score}},
853 'max_size':1600,
854 #'platerec_payload': {
855 #'regions':['us'],
856 #'camera_id':12,
857 #},
858 #'platerec_config': {
859 # 'region':'strict',
860 # 'mode': 'fast'
861 #}
862 }]
863 }
864 }
865
866# detection pattern will be my_driveway_zone_detection_pattern
867# If none is specified, the value in object_detection_pattern
868# will be used
869# This also applies to ZM zones. Let's assume you have
870# import_zm_zones=yes and let's suppose you have a zone in ZM
871# called Front_Door. In that case, all you need to do is put in a
872# front_door_zone_detection_pattern=(person|car) here
873#
874# NOTE: ZM Zones are converted to lowercase, and spaces are replaced
875# with underscores@3
876
877my_driveway_zone_detection_pattern=(person)
878some_other_area=0,0 200,300 700,900
879# use license plate recognition for my driveway
880# see alpr section later for more data needed
881resize=no
882detection_sequence=object,alpr
883
884
885[ml]
886# When enabled, you can specify complex ML inferencing logic in ml_sequence
887# Anything specified in ml_sequence will override any other ml attributes
888
889# Also, when enabled, stream_sequence will override any other frame related
890# attributes
891use_sequence = yes
892
893# if enabled, will not grab exclusive locks before running inferencing
894# locking seems to cause issues on some unique file systems
895disable_locks= no
896
897# Chain of frames
898# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
899# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence.detect_stream
900# Very important: Make sure final ending brace is indented
901stream_sequence = {
902 'frame_strategy': 'most_models',
903 'frame_set': 'snapshot,alarm',
904 'contig_frames_before_error': 5,
905 'max_attempts': 3,
906 'sleep_between_attempts': 4,
907 'resize':800
908
909 }
910
911# Chain of ML models to use
912# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
913# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence
914# Very important: Make sure final ending brace is indented
915ml_sequence= {
916 'general': {
917 'model_sequence': 'object,face,alpr',
918 'disable_locks': '{{disable_locks}}',
919 'match_past_detections': '{{match_past_detections}}',
920 'past_det_max_diff_area': '5%',
921 'car_past_det_max_diff_area': '10%',
922 #'ignore_past_detection_labels': ['dog', 'cat']
923
924 },
925 'object': {
926 'general':{
927 'pattern':'{{object_detection_pattern}}',
928 'same_model_sequence_strategy': 'first' # also 'most', 'most_unique's
929 },
930 'sequence': [{
931 #First run on TPU with higher confidence
932 'name': 'TPU object detection',
933 'enabled': 'no',
934 'object_weights':'{{tpu_object_weights_mobiledet}}',
935 'object_labels': '{{tpu_object_labels}}',
936 'object_min_confidence': {{tpu_min_confidence}},2) objectconfig.ini -
937root@30656f8d7a9e:/etc/zm# cat objectconfig.ini
938# Configuration file for object detection
939
940# NOTE: ALL parameters here can be overriden
941# on a per monitor basis if you want. Just
942# duplicate it inside the correct [monitor-<num>] section
943
944# You can create your own custom attributes in the [custom] section
945
946[general]
947
948# Please don't change this. It is used by the config upgrade script
949version=1.2
950
951# You can now limit the # of detection process
952# per target processor. If not specified, default is 1
953# Other detection processes will wait to acquire lock
954
955cpu_max_processes=3
956tpu_max_processes=1
957gpu_max_processes=1
958
959# Time to wait in seconds per processor to be free, before
960# erroring out. Default is 120 (2 mins)
961cpu_max_lock_wait=100
962tpu_max_lock_wait=100
963gpu_max_lock_wait=100
964
965
966#pyzm_overrides={'conf_path':'/etc/zm','log_level_debug':0}
967pyzm_overrides={'log_level_debug':5}
968
969# This is an optional file
970# If specified, you can specify tokens with secret values in that file
971# and onlt refer to the tokens in your main config file
972secrets = /etc/zm/secrets.ini
973
974# portal/user/password are needed if you plan on using ZM's legacy
975# auth mechanism to get images
976portal=!ZM_PORTAL
977user=!ZM_USER
978password=!ZM_PASSWORD
979
980# api portal is needed if you plan to use tokens to get images
981# requires ZM 1.33 or above
982api_portal=!ZM_API_PORTAL
983
984allow_self_signed=yes
985# if yes, last detection will be stored for monitors
986# and bounding boxes that match, along with labels
987# will be discarded for new detections. This may be helpful
988# in getting rid of static objects that get detected
989# due to some motion.
990match_past_detections=no
991# The max difference in area between the objects if match_past_detection is on
992# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
993# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
994# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
995# Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
996# example:
997# person_past_det_max_diff_area=5%
998# car_past_det_max_diff_area=5000px
999past_det_max_diff_area=5%
1000
1001# this is the maximum size a detected object can have. You can specify it in px or % just like past_det_max_diff_area
1002# This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
1003# I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
1004# and therefore I set mine to 70% because I know any valid detected objected cannot be larger than that area
1005
1006max_detection_size=90%
1007
1008# sequence of models to run for detection
1009detection_sequence=object,face,alpr
1010# if all, then we will loop through all models
1011# if first then the first success will break out
1012detection_mode=all
1013
1014# If you need basic auth to access ZM
1015#basic_user=user
1016#basic_password=password
1017
1018# base data path for various files the ES+OD needs
1019# we support in config variable substitution as well
1020base_data_path=/var/lib/zmeventnotification
1021
1022# global settings for
1023# bestmatch, alarm, snapshot OR a specific frame ID
1024frame_id=bestmatch
1025
1026# this is the to resize the image before analysis is done
1027resize=800
1028# set to yes, if you want to remove images after analysis
1029# setting to yes is recommended to avoid filling up space
1030# keep to no while debugging/inspecting masks
1031# Note this does NOT delete debug images later
1032delete_after_analyze=yes
1033
1034# If yes, will write an image called <filename>-bbox.jpg as well
1035# which contains the bounding boxes. This has NO relation to
1036# write_image_to_zm
1037# Typically, if you enable delete_after_analyze you may
1038# also want to set write_debug_image to no.
1039write_debug_image=no
1040
1041# if yes, will write an image with bounding boxes
1042# this needs to be yes to be able to write a bounding box
1043# image to ZoneMinder that is visible from its console
1044write_image_to_zm=yes
1045
1046
1047# Adds percentage to detections
1048# hog/face shows 100% always
1049show_percent=yes
1050
1051# color to be used to draw the polygons you specified
1052poly_color=(255,255,255)
1053poly_thickness=2
1054#import_zm_zones=yes
1055only_triggered_zm_zones=no
1056
1057# This section gives you an option to get brief animations
1058# of the event, delivered as part of the push notification to mobile devices
1059# Animations are created only if an object is detected
1060#
1061# NOTE: This will DELAY the time taken to send you push notifications
1062# It will try to first creat the animation, which may take upto a minute
1063# depending on how soon it gets access to frames. See notes below
1064
1065[animation]
1066
1067# If yes, object detection will attempt to create
1068# a short GIF file around the object detection frame
1069# that can be sent via push notifications for instant playback
1070# Note this required additional software support. Default:no
1071create_animation=no
1072
1073# Format of animation burst
1074# valid options are "mp4", "gif", "mp4,gif"
1075# Note that gifs will be of a shorter duration
1076# as they take up much more disk space than mp4
1077animation_types='mp4,gif'
1078
1079# default width of animation image. Be cautious when you increase this
1080# most mobile platforms give a very brief amount of time (in seconds)
1081# to download the image.
1082# Given your ZM instance will be serving the image, it will anyway be slow
1083# Making the total animation size bigger resulted in the notification not
1084# getting an image at all (timed out)
1085animation_width=640
1086
1087# When an event is detected, ZM it writes frames a little late
1088# On top of that, it looks like with caching enabled, the API layer doesn't
1089# get access to DB records for much longer (around 30 seconds), at least on my
1090# system. animation_retry_sleep refers to how long to wait before trying to grab
1091# frame information if it failed. animation_max_tries defines how many times it
1092# will try and retrieve frames before it gives up
1093animation_retry_sleep=15
1094animation_max_tries=4
1095
1096# if animation_types is gif then when can generate a fast preview gif
1097# every second frame is skipped and the frame rate doubled
1098# to give quick preview, Default (no)
1099fast_gif=no
1100
1101[remote]
1102# You can now run the machine learning code on a different server
1103# This frees up your ZM server for other things
1104# To do this, you need to setup https://github.com/pliablepixels/mlapi
1105# on your desired server and confiure it with a user. See its instructions
1106# once set up, you can choose to do object/face recognition via that
1107# external serer
1108
1109# URL that will be used
1110#ml_gateway=http://192.168.1.183:5000/api/v1
1111#ml_gateway=http://10.6.1.13:5000/api/v1
1112#ml_gateway=http://192.168.1.21:5000/api/v1
1113#ml_gateway=http://10.9.0.2:5000/api/v1
1114#ml_fallback_local=yes
1115# API/password for remote gateway
1116ml_user=!ML_USER
1117ml_password=!ML_PASSWORD
1118
1119
1120# config for object
1121[object]
1122
1123# If you are using legacy format (use_sequence=no) then these parameters will
1124# be used during ML inferencing
1125object_detection_pattern=(person|car|motorbike|bus|truck|boat)
1126object_min_confidence=0.3
1127object_framework=coral_edgetpu
1128object_processor=tpu
1129object_weights={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
1130object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
1131
1132# If you are using the new ml_sequence format (use_sequence=yes) then
1133# you can fiddle with these parameters and look at ml_sequence later
1134# Note that these can be named anything. You can add custom variables, ad-infinitum
1135
1136# Google Coral
1137# The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
1138tpu_object_weights_mobiledet={{base_data_path}}/models/coral_edgetpu/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
1139tpu_object_weights_mobilenet={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
1140tpu_object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
1141tpu_object_framework=coral_edgetpu
1142tpu_object_processor=tpu
1143tpu_min_confidence=0.6
1144
1145# Yolo v4 on GPU (falls back to CPU if no GPU)
1146yolo4_object_weights={{base_data_path}}/models/yolov4/yolov4.weights
1147yolo4_object_labels={{base_data_path}}/models/yolov4/coco.names
1148yolo4_object_config={{base_data_path}}/models/yolov4/yolov4.cfg
1149yolo4_object_framework=opencv
1150yolo4_object_processor=gpu
1151
1152# Yolo v3 on GPU (falls back to CPU if no GPU)
1153yolo3_object_weights={{base_data_path}}/models/yolov3/yolov3.weights
1154yolo3_object_labels={{base_data_path}}/models/yolov3/coco.names
1155yolo3_object_config={{base_data_path}}/models/yolov3/yolov3.cfg
1156yolo3_object_framework=opencv
1157yolo3_object_processor=gpu
1158
1159# Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
1160tinyyolo_object_config={{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg
1161tinyyolo_object_weights={{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights
1162tinyyolo_object_labels={{base_data_path}}/models/tinyyolov4/coco.names
1163tinyyolo_object_framework=opencv
1164tinyyolo_object_processor=gpu
1165
1166
1167[face]
1168face_detection_pattern=.*
1169known_images_path={{base_data_path}}/known_faces
1170unknown_images_path={{base_data_path}}/unknown_faces
1171save_unknown_faces=yes
1172save_unknown_faces_leeway_pixels=100
1173face_detection_framework=dlib
1174
1175# read https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems
1176# read https://github.com/ageitgey/face_recognition#automatically-find-all-the-faces-in-an-image
1177# and play around
1178
1179# quick overview:
1180# num_jitters is how many times to distort images
1181# upsample_times is how many times to upsample input images (for small faces, for example)
1182# model can be hog or cnn. cnn may be more accurate, but I haven't found it to be
1183
1184face_num_jitters=1
1185face_model=cnn
1186face_upsample_times=1
1187
1188# This is maximum distance of the face under test to the closest matched
1189# face cluster. The larger this distance, larger the chances of misclassification.
1190#
1191face_recog_dist_threshold=0.6
1192# When we are first training the face recognition model with known faces,
1193# by default we use hog because we assume you will supply well lit, front facing faces
1194# However, if you are planning to train with profile photos or hard to see faces, you
1195# may want to change this to cnn. Note that this increases training time, but training only
1196# happens once, unless you retrain again by removing the training model
1197face_train_model=cnn
1198#if a face doesn't match known names, we will detect it as 'unknown face'
1199# you can change that to something that suits your personality better ;-)
1200#unknown_face_name=invader
1201
1202[alpr]
1203alpr_detection_pattern=.*
1204alpr_use_after_detection_only=yes
1205# Many of the ALPR providers offer both a cloud version
1206# and local SDK version. Sometimes local SDK format differs from
1207# the cloud instance. Set this to local or cloud. Default cloud
1208alpr_api_type=cloud
1209
1210# -----| If you are using plate recognizer | ------
1211alpr_service=plate_recognizer
1212#alpr_service=open_alpr_cmdline
1213
1214# If you want to host a local SDK https://app.platerecognizer.com/sdk/
1215#alpr_url=http://192.168.1.21:8080/alpr
1216# Plate recog replace with your api key
1217alpr_key=!PLATEREC_ALPR_KEY
1218# if yes, then it will log usage statistics of the ALPR service
1219platerec_stats=yes
1220# If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
1221#platerec_regions=['us','cn','kr']
1222# minimal confidence for actually detecting a plate
1223platerec_min_dscore=0.1
1224# minimal confidence for the translated text
1225platerec_min_score=0.2
1226
1227
1228# ----| If you are using openALPR |-----
1229#alpr_service=open_alpr
1230#alpr_key=!OPENALPR_ALPR_KEY
1231
1232# For an explanation of params, see http://doc.openalpr.com/api/?api=cloudapi
1233#openalpr_recognize_vehicle=1
1234#openalpr_country=us
1235#openalpr_state=ca
1236# openalpr returns percents, but we convert to between 0 and 1
1237#openalpr_min_confidence=0.3
1238
1239# ----| If you are using openALPR command line |-----
1240
1241openalpr_cmdline_binary=alpr
1242
1243# Do an alpr -help to see options, plug them in here
1244# like say '-j -p ca -c US' etc.
1245# keep the -j because its JSON
1246
1247# Note that alpr_pattern is honored
1248# For the rest, just stuff them in the cmd line options
1249
1250openalpr_cmdline_params=-j -d
1251openalpr_cmdline_min_confidence=0.3
1252
1253
1254## Monitor specific settings
1255
1256
1257# Examples:
1258# Let's assume your monitor ID is 999
1259[monitor-999]
1260# my driveway
1261match_past_detections=no
1262wait=5
1263object_detection_pattern=(person)
1264
1265# Advanced example - here we want anything except potted plant
1266# exclusion in regular expressions is not
1267# as straightforward as you may think, so
1268# follow this pattern
1269# object_detection_pattern = ^(?!object1|object2|objectN)
1270# the characters in front implement what is
1271# called a negative look ahead
1272
1273# object_detection_pattern=^(?!potted plant|pottedplant|bench|broccoli)
1274#alpr_detection_pattern=^(.*x11)
1275#delete_after_analyze=no
1276#detection_pattern=.*
1277#import_zm_zones=yes
1278
1279# polygon areas where object detection will be done.
1280# You can name them anything except the keywords defined in the optional
1281# params below. You can put as many polygons as you want per [monitor-<mid>]
1282# (see examples).
1283
1284my_driveway=306,356 1003,341 1074,683 154,715
1285
1286# You are now allowed to specify detection pattern per zone
1287# the format is <polygonname>_zone_detection_pattern=<regexp>
1288# So if your polygon is called my_driveway, its associated
1289# detection pattern will be my_driveway_zone_detection_pattern
1290# If none is specified, the value in object_detection_pattern
1291# will be used
1292# This also applies to ZM zones. Let's assume you have
1293# import_zm_zones=yes and let's suppose you have a zone in ZM
1294# called Front_Door. In that case, all you need to do is put in a
1295# front_door_zone_detection_pattern=(person|car) here
1296#
1297# NOTE: ZM Zones are converted to lowercase, and spaces are replaced
1298# with underscores@3
1299
1300my_driveway_zone_detection_pattern=(person)
1301some_other_area=0,0 200,300 700,900
1302# use license plate recognition for my driveway
1303# see alpr section later for more data needed
1304resize=no
1305detection_sequence=object,alpr
1306
1307
1308[ml]
1309# When enabled, you can specify complex ML inferencing logic in ml_sequence
1310# Anything specified in ml_sequence will override any other ml attributes
1311
1312# Also, when enabled, stream_sequence will override any other frame related
1313# attributes
1314use_sequence = yes
1315
1316# if enabled, will not grab exclusive locks before running inferencing
1317# locking seems to cause issues on some unique file systems
1318disable_locks= no
1319
1320# Chain of frames
1321# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
1322# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence.detect_stream
1323# Very important: Make sure final ending brace is indented
1324stream_sequence = {
1325 'frame_strategy': 'most_models',
1326 'frame_set': 'snapshot,alarm',
1327 'contig_frames_before_error': 5,
1328 'max_attempts': 3,
1329 'sleep_between_attempts': 4,
1330 'resize':800
1331
1332 }
1333
1334# Chain of ML models to use
1335# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
1336# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence
1337# Very important: Make sure final ending brace is indented
1338ml_sequence= {
1339 'general': {
1340 'model_sequence': 'object,face,alpr',
1341 'disable_locks': '{{disable_locks}}',
1342 'match_past_detections': '{{match_past_detections}}',
1343 'past_det_max_diff_area': '5%',
1344 'car_past_det_max_diff_area': '10%',
1345 #'ignore_past_detection_labels': ['dog', 'cat']
1346
1347 },
1348 'object': {
1349 'general':{
1350 'pattern':'{{object_detection_pattern}}',
1351 'same_model_sequence_strategy': 'first' # also 'most', 'most_unique's
1352 },
1353 'sequence': [{
1354 #First run on TPU with higher confidence
1355 'name': 'TPU object detection',
1356 'enabled': 'no',
1357 'object_weights':'{{tpu_object_weights_mobiledet}}',
1358 'object_labels': '{{tpu_object_labels}}',
1359 'object_min_confidence': {{tpu_min_confidence}},
1360 'object_framework':'{{tpu_object_framework}}',
1361 'tpu_max_processes': {{tpu_max_processes}},
1362 'tpu_max_lock_wait': {{tpu_max_lock_wait}},
1363 'max_detection_size':'{{max_detection_size}}'
1364
1365
1366 },
1367 {
1368 # YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
1369 'name': 'YoloV4 GPU/CPU',
1370 'enabled': 'yes', # don't really need to say this explictly
1371 'object_config':'{{yolo4_object_config}}',
1372 'object_weights':'{{yolo4_object_weights}}',
1373 'object_labels': '{{yolo4_object_labels}}',
1374 'object_min_confidence': {{object_min_confidence}},
1375 'object_framework':'{{yolo4_object_framework}}',
1376 'object_processor': '{{yolo4_object_processor}}',
1377 'gpu_max_processes': {{gpu_max_processes}},
1378 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
1379 'cpu_max_processes': {{cpu_max_processes}},
1380 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
1381 'max_detection_size':'{{max_detection_size}}'
1382
1383 }]
1384 },
1385 'face': {
1386 'general':{
1387 'pattern': '{{face_detection_pattern}}',
1388 'same_model_sequence_strategy': 'union' # combines all outputs of this sequence
1389 },
1390 'sequence': [
1391 {
1392 'name': 'TPU face detection',
1393 'enabled': 'no',
1394 'face_detection_framework': 'tpu',
1395 'face_weights':'/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
1396 'face_min_confidence': 0.3,
1397
1398 },
1399 {
1400 'name': 'DLIB based face recognition',
1401 'enabled': 'yes',
1402 #'pre_existing_labels': ['face'], # If you use TPU detection first, we can run this ONLY if TPU detects a face first
1403 'save_unknown_faces':'{{save_unknown_faces}}',
1404 'save_unknown_faces_leeway_pixels':{{save_unknown_faces_leeway_pixels}},
1405 'face_detection_framework': '{{face_detection_framework}}',
1406 'known_images_path': '{{known_images_path}}',
1407 'unknown_images_path': '{{unknown_images_path}}',
1408 'face_model': '{{face_model}}',
1409 'face_train_model': '{{face_train_model}}',
1410 'face_recog_dist_threshold': '{{face_recog_dist_threshold}}',
1411 'face_num_jitters': '{{face_num_jitters}}',
1412 'face_upsample_times':'{{face_upsample_times}}',
1413 'gpu_max_processes': {{gpu_max_processes}},
1414 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
1415 'cpu_max_processes': {{cpu_max_processes}},
1416 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
1417 'max_size':800
1418 }]
1419 },
1420
1421 'alpr': {
1422 'general':{
1423 'same_model_sequence_strategy': 'first',
1424 'pre_existing_labels':['car', 'motorbike', 'bus', 'truck', 'boat'],
1425 'pattern': '{{alpr_detection_pattern}}'
1426
1427 },
1428 'sequence': [{
1429 'name': 'Platerecognizer cloud',
1430 'enabled': 'yes',
1431 'alpr_api_type': '{{alpr_api_type}}',
1432 'alpr_service': '{{alpr_service}}',
1433 'alpr_key': '{{alpr_key}}',
1434 'platrec_stats': '{{platerec_stats}}',
1435 'platerec_min_dscore': {{platerec_min_dscore}},
1436 'platerec_min_score': {{platerec_min_score}},
1437 'max_size':1600,
1438 #'platerec_payload': {
1439 #'regions':['us'],
1440 #'camera_id':12,
1441 #},
1442 #'platerec_config': {
1443 # 'region':'strict',
1444 # 'mode': 'fast'
1445 #}
1446 }]
1447 }
1448 }
1449
1450 'object_framework':'{{tpu_object_framework}}',
1451 'tpu_max_processes': {{tpu_max_processes}},
1452 'tpu_max_lock_wait': {{tpu_max_lock_wait}},
1453 'max_detection_size':'{{max_detection_size}}'
1454
1455
1456 },
1457 {
1458 # YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
1459 'name': 'YoloV4 GPU/CPU',
1460 'enabled': 'yes', # don't really need to say this explictly
1461 'object_config':'{{yolo4_object_config}}',
1462 'object_weights':'{{yolo4_object_weights}}',
1463 'object_labels': '{{yolo4_object_labels}}',
1464 'object_min_confidence': {{object_min_confidence}},
1465 'object_framework':'{{yolo4_object_framework}}',
1466 'object_processor': '{{yolo4_object_processor}}',
1467 'gpu_max_processes': {{gpu_max_processes}},
1468 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
1469 'cpu_max_processes': {{cpu_max_processes}},
1470 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
1471 'max_detection_size':'{{max_detection_size}}'
1472
1473 }]
1474 },
1475 'face': {
1476 'general':{
1477 'pattern': '{{face_detection_pattern}}',
1478 'same_model_sequence_strategy': 'union' # combines all outputs of this sequence
1479 },
1480 'sequence': [
1481 {
1482 'name': 'TPU face detection',
1483 'enabled': 'no',
1484 'face_detection_framework': 'tpu',
1485 'face_weights':'/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
1486 'face_min_confidence': 0.3,
1487
1488 },
1489 {
1490 'name': 'DLIB based face recognition',
1491 'enabled': 'yes',
1492 #'pre_existing_labels': ['face'], # If you use TPU detection first, we can run this ONLY if TPU detects a face first
1493 'save_unknown_faces':'{{save_unknown_faces}}',
1494 'save_unknown_faces_leeway_pixels':{{save_unknown_faces_leeway_pixels}},
1495 'face_detection_framework': '{{face_detection_framework}}',
1496 'known_images_path': '{{known_images_path}}',
1497 'unknown_images_path': '{{unknown_images_path}}',
1498 'face_model': '{{face_model}}',
1499 'face_train_model': '{{face_train_model}}',
1500 'face_recog_dist_threshold': '{{face_recog_dist_threshold}}',
1501 'face_num_jitters': '{{face_num_jitters}}',
1502 'face_upsample_times':'{{face_upsample_times}}',
1503 'gpu_max_processes': {{gpu_max_processes}},
1504 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
1505 'cpu_max_processes': {{cpu_max_processes}},
1506 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
1507 'max_size':800
1508 }]
1509 },
1510
1511 'alpr': {
1512 'general':{
1513 'same_model_sequence_strategy': 'first',
1514 'pre_existing_labels':['car', 'motorbike', 'bus', 'truck', 'boat'],
1515 'pattern': '{{alpr_detection_pattern}}'
1516
1517 },
1518 'sequence': [{
1519 'name': 'Platerecognizer cloud',
1520 'enabled': 'yes',
1521 'alpr_api_type': '{{alpr_api_type}}',
1522 'alpr_service': '{{alpr_service}}',
1523 'alpr_key': '{{alpr_key}}',
1524 'platrec_stats': '{{platerec_stats}}',
1525 'platerec_min_dscore': {{platerec_min_dscore}},
1526 'platerec_min_score': {{platerec_min_score}},
1527 'max_size':1600,
1528 #'platerec_payload': {
1529 #'regions':['us'],
1530 #'camera_id':12,
1531 #},
1532 #'platerec_config': {
1533 # 'region':'strict',
1534 # 'mode': 'fast'
1535 #}
1536 }]
1537 }
1538 }
1539