· 3 years ago · Mar 05, 2022, 09:40 AM
1root@30656f8d7a9e:/etc/zm# cat objectconfig.ini
2# Configuration file for object detection
3
4# NOTE: ALL parameters here can be overriden
5# on a per monitor basis if you want. Just
6# duplicate it inside the correct [monitor-<num>] section
7
8# You can create your own custom attributes in the [custom] section
9
10[general]
11
12# Please don't change this. It is used by the config upgrade script
13version=1.2
14
15# You can now limit the # of detection process
16# per target processor. If not specified, default is 1
17# Other detection processes will wait to acquire lock
18
19cpu_max_processes=3
20tpu_max_processes=1
21gpu_max_processes=1
22
23# Time to wait in seconds per processor to be free, before
24# erroring out. Default is 120 (2 mins)
25cpu_max_lock_wait=100
26tpu_max_lock_wait=100
27gpu_max_lock_wait=100
28
29
30#pyzm_overrides={'conf_path':'/etc/zm','log_level_debug':0}
31pyzm_overrides={'log_level_debug':5}
32
33# This is an optional file
34# If specified, you can specify tokens with secret values in that file
35# and onlt refer to the tokens in your main config file
36secrets = /etc/zm/secrets.ini
37
38# portal/user/password are needed if you plan on using ZM's legacy
39# auth mechanism to get images
40portal=!ZM_PORTAL
41user=!ZM_USER
42password=!ZM_PASSWORD
43
44# api portal is needed if you plan to use tokens to get images
45# requires ZM 1.33 or above
46api_portal=!ZM_API_PORTAL
47
48allow_self_signed=yes
49# if yes, last detection will be stored for monitors
50# and bounding boxes that match, along with labels
51# will be discarded for new detections. This may be helpful
52# in getting rid of static objects that get detected
53# due to some motion.
54match_past_detections=no
55# The max difference in area between the objects if match_past_detection is on
56# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
57# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
58# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
59# Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
60# example:
61# person_past_det_max_diff_area=5%
62# car_past_det_max_diff_area=5000px
63past_det_max_diff_area=5%
64
65# this is the maximum size a detected object can have. You can specify it in px or % just like past_det_max_diff_area
66# This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
67# I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
68# and therefore I set mine to 70% because I know any valid detected objected cannot be larger than that area
69
70max_detection_size=90%
71
72# sequence of models to run for detection
73detection_sequence=object,face,alpr
74# if all, then we will loop through all models
75# if first then the first success will break out
76detection_mode=all
77
78# If you need basic auth to access ZM
79#basic_user=user
80#basic_password=password
81
82# base data path for various files the ES+OD needs
83# we support in config variable substitution as well
84base_data_path=/var/lib/zmeventnotification
85
86# global settings for
87# bestmatch, alarm, snapshot OR a specific frame ID
88frame_id=bestmatch
89
90# this is the to resize the image before analysis is done
91resize=800
92# set to yes, if you want to remove images after analysis
93# setting to yes is recommended to avoid filling up space
94# keep to no while debugging/inspecting masks
95# Note this does NOT delete debug images later
96delete_after_analyze=yes
97
98# If yes, will write an image called <filename>-bbox.jpg as well
99# which contains the bounding boxes. This has NO relation to
100# write_image_to_zm
101# Typically, if you enable delete_after_analyze you may
102# also want to set write_debug_image to no.
103write_debug_image=no
104
105# if yes, will write an image with bounding boxes
106# this needs to be yes to be able to write a bounding box
107# image to ZoneMinder that is visible from its console
108write_image_to_zm=yes
109
110
111# Adds percentage to detections
112# hog/face shows 100% always
113show_percent=yes
114
115# color to be used to draw the polygons you specified
116poly_color=(255,255,255)
117poly_thickness=2
118#import_zm_zones=yes
119only_triggered_zm_zones=no
120
121# This section gives you an option to get brief animations
122# of the event, delivered as part of the push notification to mobile devices
123# Animations are created only if an object is detected
124#
125# NOTE: This will DELAY the time taken to send you push notifications
126# It will try to first creat the animation, which may take upto a minute
127# depending on how soon it gets access to frames. See notes below
128
129[animation]
130
131# If yes, object detection will attempt to create
132# a short GIF file around the object detection frame
133# that can be sent via push notifications for instant playback
134# Note this required additional software support. Default:no
135create_animation=no
136
137# Format of animation burst
138# valid options are "mp4", "gif", "mp4,gif"
139# Note that gifs will be of a shorter duration
140# as they take up much more disk space than mp4
141animation_types='mp4,gif'
142
143# default width of animation image. Be cautious when you increase this
144# most mobile platforms give a very brief amount of time (in seconds)
145# to download the image.
146# Given your ZM instance will be serving the image, it will anyway be slow
147# Making the total animation size bigger resulted in the notification not
148# getting an image at all (timed out)
149animation_width=640
150
151# When an event is detected, ZM it writes frames a little late
152# On top of that, it looks like with caching enabled, the API layer doesn't
153# get access to DB records for much longer (around 30 seconds), at least on my
154# system. animation_retry_sleep refers to how long to wait before trying to grab
155# frame information if it failed. animation_max_tries defines how many times it
156# will try and retrieve frames before it gives up
157animation_retry_sleep=15
158animation_max_tries=4
159
160# if animation_types is gif then when can generate a fast preview gif
161# every second frame is skipped and the frame rate doubled
162# to give quick preview, Default (no)
163fast_gif=no
164
165[remote]
166# You can now run the machine learning code on a different server
167# This frees up your ZM server for other things
168# To do this, you need to setup https://github.com/pliablepixels/mlapi
169# on your desired server and confiure it with a user. See its instructions
170# once set up, you can choose to do object/face recognition via that
171# external serer
172
173# URL that will be used
174#ml_gateway=http://192.168.1.183:5000/api/v1
175#ml_gateway=http://10.6.1.13:5000/api/v1
176#ml_gateway=http://192.168.1.21:5000/api/v1
177#ml_gateway=http://10.9.0.2:5000/api/v1
178#ml_fallback_local=yes
179# API/password for remote gateway
180ml_user=!ML_USER
181ml_password=!ML_PASSWORD
182
183
184# config for object
185[object]
186
187# If you are using legacy format (use_sequence=no) then these parameters will
188# be used during ML inferencing
189object_detection_pattern=(person|car|motorbike|bus|truck|boat)
190object_min_confidence=0.3
191object_framework=coral_edgetpu
192object_processor=tpu
193object_weights={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
194object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
195
196# If you are using the new ml_sequence format (use_sequence=yes) then
197# you can fiddle with these parameters and look at ml_sequence later
198# Note that these can be named anything. You can add custom variables, ad-infinitum
199
200# Google Coral
201# The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
202tpu_object_weights_mobiledet={{base_data_path}}/models/coral_edgetpu/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
203tpu_object_weights_mobilenet={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
204tpu_object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
205tpu_object_framework=coral_edgetpu
206tpu_object_processor=tpu
207tpu_min_confidence=0.6
208
209# Yolo v4 on GPU (falls back to CPU if no GPU)
210yolo4_object_weights={{base_data_path}}/models/yolov4/yolov4.weights
211yolo4_object_labels={{base_data_path}}/models/yolov4/coco.names
212yolo4_object_config={{base_data_path}}/models/yolov4/yolov4.cfg
213yolo4_object_framework=opencv
214yolo4_object_processor=gpu
215
216# Yolo v3 on GPU (falls back to CPU if no GPU)
217yolo3_object_weights={{base_data_path}}/models/yolov3/yolov3.weights
218yolo3_object_labels={{base_data_path}}/models/yolov3/coco.names
219yolo3_object_config={{base_data_path}}/models/yolov3/yolov3.cfg
220yolo3_object_framework=opencv
221yolo3_object_processor=gpu
222
223# Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
224tinyyolo_object_config={{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg
225tinyyolo_object_weights={{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights
226tinyyolo_object_labels={{base_data_path}}/models/tinyyolov4/coco.names
227tinyyolo_object_framework=opencv
228tinyyolo_object_processor=gpu
229
230
231[face]
232face_detection_pattern=.*
233known_images_path={{base_data_path}}/known_faces
234unknown_images_path={{base_data_path}}/unknown_faces
235save_unknown_faces=yes
236save_unknown_faces_leeway_pixels=100
237face_detection_framework=dlib
238
239# read https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems
240# read https://github.com/ageitgey/face_recognition#automatically-find-all-the-faces-in-an-image
241# and play around
242
243# quick overview:
244# num_jitters is how many times to distort images
245# upsample_times is how many times to upsample input images (for small faces, for example)
246# model can be hog or cnn. cnn may be more accurate, but I haven't found it to be
247
248face_num_jitters=1
249face_model=cnn
250face_upsample_times=1
251
252# This is maximum distance of the face under test to the closest matched
253# face cluster. The larger this distance, larger the chances of misclassification.
254#
255face_recog_dist_threshold=0.6
256# When we are first training the face recognition model with known faces,
257# by default we use hog because we assume you will supply well lit, front facing faces
258# However, if you are planning to train with profile photos or hard to see faces, you
259# may want to change this to cnn. Note that this increases training time, but training only
260# happens once, unless you retrain again by removing the training model
261face_train_model=cnn
262#if a face doesn't match known names, we will detect it as 'unknown face'
263# you can change that to something that suits your personality better ;-)
264#unknown_face_name=invader
265
266[alpr]
267alpr_detection_pattern=.*
268alpr_use_after_detection_only=yes
269# Many of the ALPR providers offer both a cloud version
270# and local SDK version. Sometimes local SDK format differs from
271# the cloud instance. Set this to local or cloud. Default cloud
272alpr_api_type=cloud
273
274# -----| If you are using plate recognizer | ------
275alpr_service=plate_recognizer
276#alpr_service=open_alpr_cmdline
277
278# If you want to host a local SDK https://app.platerecognizer.com/sdk/
279#alpr_url=http://192.168.1.21:8080/alpr
280# Plate recog replace with your api key
281alpr_key=!PLATEREC_ALPR_KEY
282# if yes, then it will log usage statistics of the ALPR service
283platerec_stats=yes
284# If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
285#platerec_regions=['us','cn','kr']
286# minimal confidence for actually detecting a plate
287platerec_min_dscore=0.1
288# minimal confidence for the translated text
289platerec_min_score=0.2
290
291
292# ----| If you are using openALPR |-----
293#alpr_service=open_alpr
294#alpr_key=!OPENALPR_ALPR_KEY
295
296# For an explanation of params, see http://doc.openalpr.com/api/?api=cloudapi
297#openalpr_recognize_vehicle=1
298#openalpr_country=us
299#openalpr_state=ca
300# openalpr returns percents, but we convert to between 0 and 1
301#openalpr_min_confidence=0.3
302
303# ----| If you are using openALPR command line |-----
304
305openalpr_cmdline_binary=alpr
306
307# Do an alpr -help to see options, plug them in here
308# like say '-j -p ca -c US' etc.
309# keep the -j because its JSON
310
311# Note that alpr_pattern is honored
312# For the rest, just stuff them in the cmd line options
313
314openalpr_cmdline_params=-j -d
315openalpr_cmdline_min_confidence=0.3
316
317
318## Monitor specific settings
319
320
321# Examples:
322# Let's assume your monitor ID is 999
323[monitor-999]
324# my driveway
325match_past_detections=no
326wait=5
327object_detection_pattern=(person)
328
329# Advanced example - here we want anything except potted plant
330# exclusion in regular expressions is not
331# as straightforward as you may think, so
332# follow this pattern
333# object_detection_pattern = ^(?!object1|object2|objectN)
334# the characters in front implement what is
335# called a negative look ahead
336
337# object_detection_pattern=^(?!potted plant|pottedplant|bench|broccoli)
338#alpr_detection_pattern=^(.*x11)
339#delete_after_analyze=no
340#detection_pattern=.*
341#import_zm_zones=yes
342
343# polygon areas where object detection will be done.
344# You can name them anything except the keywords defined in the optional
345# params below. You can put as many polygons as you want per [monitor-<mid>]
346# (see examples).
347
348my_driveway=306,356 1003,341 1074,683 154,715
349
350# You are now allowed to specify detection pattern per zone
351# the format is <polygonname>_zone_detection_pattern=<regexp>
352# So if your polygon is called my_driveway, its associated
353# detection pattern will be my_driveway_zone_detection_pattern
354# If none is specified, the value in object_detection_pattern
355# will be used
356# This also applies to ZM zones. Let's assume you have
357# import_zm_zones=yes and let's suppose you have a zone in ZM
358# called Front_Door. In that case, all you need to do is put in a
359# front_door_zone_detection_pattern=(person|car) here
360#
361# NOTE: ZM Zones are converted to lowercase, and spaces are replaced
362# with underscores@3
363
364my_driveway_zone_detection_pattern=(person)
365some_other_area=0,0 200,300 700,900
366# use license plate recognition for my driveway
367# see alpr section later for more data needed
368resize=no
369detection_sequence=object,alpr
370
371
372[ml]
373# When enabled, you can specify complex ML inferencing logic in ml_sequence
374# Anything specified in ml_sequence will override any other ml attributes
375
376# Also, when enabled, stream_sequence will override any other frame related
377# attributes
378use_sequence = yes
379
380# if enabled, will not grab exclusive locks before running inferencing
381# locking seems to cause issues on some unique file systems
382disable_locks= no
383
384# Chain of frames
385# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
386# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence.detect_stream
387# Very important: Make sure final ending brace is indented
388stream_sequence = {
389 'frame_strategy': 'most_models',
390 'frame_set': 'snapshot,alarm',
391 'contig_frames_before_error': 5,
392 'max_attempts': 3,
393 'sleep_between_attempts': 4,
394 'resize':800
395
396 }
397
398# Chain of ML models to use
399# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
400# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence
401# Very important: Make sure final ending brace is indented
402ml_sequence= {
403 'general': {
404 'model_sequence': 'object,face,alpr',
405 'disable_locks': '{{disable_locks}}',
406 'match_past_detections': '{{match_past_detections}}',
407 'past_det_max_diff_area': '5%',
408 'car_past_det_max_diff_area': '10%',
409 #'ignore_past_detection_labels': ['dog', 'cat']
410
411 },
412 'object': {
413 'general':{
414 'pattern':'{{object_detection_pattern}}',
415 'same_model_sequence_strategy': 'first' # also 'most', 'most_unique's
416 },
417 'sequence': [{
418 #First run on TPU with higher confidence
419 'name': 'TPU object detection',
420 'enabled': 'no',
421 'object_weights':'{{tpu_object_weights_mobiledet}}',
422 'object_labels': '{{tpu_object_labels}}',
423 'object_min_confidence': {{tpu_min_confidence}},
424 'object_framework':'{{tpu_object_framework}}',
425 'tpu_max_processes': {{tpu_max_processes}},
426 'tpu_max_lock_wait': {{tpu_max_lock_wait}},
427 'max_detection_size':'{{max_detection_size}}'
428
429
430 },
431 {
432 # YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
433 'name': 'YoloV4 GPU/CPU',
434 'enabled': 'yes', # don't really need to say this explictly
435 'object_config':'{{yolo4_object_config}}',
436 'object_weights':'{{yolo4_object_weights}}',
437 'object_labels': '{{yolo4_object_labels}}',
438 'object_min_confidence': {{object_min_confidence}},
439 'object_framework':'{{yolo4_object_framework}}',
440 'object_processor': '{{yolo4_object_processor}}',
441 'gpu_max_processes': {{gpu_max_processes}},
442 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
443 'cpu_max_processes': {{cpu_max_processes}},
444 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
445 'max_detection_size':'{{max_detection_size}}'
446
447 }]
448 },
449 'face': {
450 'general':{
451 'pattern': '{{face_detection_pattern}}',
452 'same_model_sequence_strategy': 'union' # combines all outputs of this sequence
453 },
454 'sequence': [
455 {
456 'name': 'TPU face detection',
457 'enabled': 'no',
458 'face_detection_framework': 'tpu',
459 'face_weights':'/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
460 'face_min_confidence': 0.3,
461
462 },
463 {
464 'name': 'DLIB based face recognition',
465 'enabled': 'yes',
466 #'pre_existing_labels': ['face'], # If you use TPU detection first, we can run this ONLY if TPU detects a face first
467 'save_unknown_faces':'{{save_unknown_faces}}',
468 'save_unknown_faces_leeway_pixels':{{save_unknown_faces_leeway_pixels}},
469 'face_detection_framework': '{{face_detection_framework}}',
470 'known_images_path': '{{known_images_path}}',
471 'unknown_images_path': '{{unknown_images_path}}',
472 'face_model': '{{face_model}}',
473 'face_train_model': '{{face_train_model}}',
474 'face_recog_dist_threshold': '{{face_recog_dist_threshold}}',
475 'face_num_jitters': '{{face_num_jitters}}',
476 'face_upsample_times':'{{face_upsample_times}}',
477 'gpu_max_processes': {{gpu_max_processes}},
478 'gpu_max_lock_wait': {{gpu_max_lock_wait}},
479 'cpu_max_processes': {{cpu_max_processes}},
480 'cpu_max_lock_wait': {{cpu_max_lock_wait}},
481 'max_size':800
482 }]
483 },
484
485 'alpr': {
486 'general':{
487 'same_model_sequence_strategy': 'first',
488 'pre_existing_labels':['car', 'motorbike', 'bus', 'truck', 'boat'],
489 'pattern': '{{alpr_detection_pattern}}'
490
491 },
492 'sequence': [{
493 'name': 'Platerecognizer cloud',
494 'enabled': 'yes',
495 'alpr_api_type': '{{alpr_api_type}}',
496 'alpr_service': '{{alpr_service}}',
497 'alpr_key': '{{alpr_key}}',
498 'platrec_stats': '{{platerec_stats}}',
499 'platerec_min_dscore': {{platerec_min_dscore}},
500 'platerec_min_score': {{platerec_min_score}},
501 'max_size':1600,
502 #'platerec_payload': {
503 #'regions':['us'],
504 #'camera_id':12,
505 #},
506 #'platerec_config': {
507 # 'region':'strict',
508 # 'mode': 'fast'
509 #}
510 }]
511 }
512 }
513
514
515
516