Path: blob/master/video-editor/video_editor.py
1085 views
import logging1import math2import os3import sys4import tempfile5import time6from pathlib import Path78import cv29import ffmpegcv10import numpy as np11import requests12from flask import Flask, jsonify, request13from interpolator import Interpolator14from utils import draw_bounding_box_on_image1516try:17LOG_LEVEL = int(os.environ.get("LOGGING", logging.INFO))18except ValueError as e:19raise RuntimeError(20"The LOGGING config should be a number, "21"See https://guides.platerecognizer.com/docs/blur/configuration#logging"22) from e2324logging.basicConfig(25stream=sys.stdout,26level=LOG_LEVEL,27datefmt="%Y-%m-%d %H:%M:%S",28format="%(levelname)-5s [%(name)s.%(lineno)d] => %(message)s",29)3031lgr = logging.getLogger("video-editor")3233BASE_WORKING_DIR = "/user-data/"343536def recognition_api(cv2_frame, data, sdk_url, api_key):37retval, buffer = cv2.imencode(".jpg", cv2_frame)3839if sdk_url:40url = sdk_url + "/v1/plate-reader/"41headers = None42else:43if api_key is None:44raise Exception("A TOKEN is required if using Cloud API")4546url = "https://api.platerecognizer.com/v1/plate-reader/"47headers = {"Authorization": "Token " + api_key}4849while True:50response = requests.post(51url, files=dict(upload=buffer), headers=headers, data=data52)5354if response.status_code < 200 or response.status_code > 300:55if response.status_code == 429:56time.sleep(1)57else:58logging.error(response.text)59raise Exception("Error running recognition")60else:61res_json = response.json()62if "error" in res_json:63logging.error(response.text)64raise Exception("Error running recognition")6566return res_json676869def visualize_frame(cv2_frame, sdk_url, snapshot_api_token):70run_recognition_response = recognition_api(71cv2_frame, {}, sdk_url, snapshot_api_token72)7374for result in run_recognition_response["results"]:75plate_bounding_box = result["box"]76plate = result["plate"]77draw_bounding_box_on_image(78cv2_frame,79plate_bounding_box["ymin"],80plate_bounding_box["xmin"],81plate_bounding_box["ymax"],82plate_bounding_box["xmax"],83plate,84)8586# Vehicle box87if result["vehicle"]["score"] > 0:88vehicle_bounding_box = result["vehicle"]["box"]89vehicle = result["vehicle"]["type"]90draw_bounding_box_on_image(91cv2_frame,92vehicle_bounding_box["ymin"],93vehicle_bounding_box["xmin"],94vehicle_bounding_box["ymax"],95vehicle_bounding_box["xmax"],96vehicle,97)9899return cv2_frame100101102def blur_api(cv2_frame, blur_url):103retval, buffer = cv2.imencode(".jpg", cv2_frame)104105response = requests.post(blur_url, files=dict(upload=("frame.jpg", buffer)))106if response.status_code < 200 or response.status_code > 300:107logging.error(response.text)108raise Exception("Error performing blur")109else:110return response111112113def ellipse_polygon(box, scale=1.0, num_points=64):114"""115Generate ellipse polygon points from a face bounding box:116'box': A dict with keys 'xmin', 'ymin', 'xmax', 'ymax'117118scale: enlarge/shrink ellipse relative to the box119num_points: number of polygon vertices120"""121122xmin, ymin = box["xmin"], box["ymin"]123xmax, ymax = box["xmax"], box["ymax"]124125# Center of ellipse (middle of bounding box)126cx = (xmin + xmax) / 2127cy = (ymin + ymax) / 2128129# Radii (half width/height of box)130rx = (xmax - xmin) / 2 * scale131ry = (ymax - ymin) / 2 * scale132133pts = []134for i in range(num_points):135theta = 2 * math.pi * (i / num_points)136x = cx + rx * math.cos(theta)137y = cy + ry * math.sin(theta)138pts.append([x, y])139return pts140141142def get_blur_polygons(cv2_frame: np.ndarray, blur_url: str):143"""144Call Blur API to request polygons to be blurred.145"""146blur_response = blur_api(cv2_frame, blur_url)147polygons = [148np.array(plate["polygon"], dtype=np.float32)149for plate in blur_response.json()["plates"]150]151152for face in blur_response.json()["faces"]:153polygon = ellipse_polygon(face["box"])154polygons.append(np.array(polygon, dtype=np.float32))155156return polygons157158159def save_frame(count, cv2_image, save_dir, image_format="jpg"):160save_path = f"{save_dir}frame_{count}.{image_format}"161lgr.debug(f"saving frame to: {save_path}")162if image_format == "png":163# default 3, 9 is highest compression164cv2.imwrite(save_path, cv2_image, [int(cv2.IMWRITE_PNG_COMPRESSION), 3])165166elif image_format == "jpg":167# default 95, 100 is best quality168cv2.imwrite(save_path, cv2_image, [cv2.IMWRITE_JPEG_QUALITY, 95])169170else:171raise Exception(f"Unrecognized Output format: {image_format}")172173174def init_writer(filename, fps):175return ffmpegcv.noblock(ffmpegcv.VideoWriter, filename, "h264", fps)176177178def process_video(video, action): # noqa: C901 TODO: Break down to reduce cyclomatic complexity179filename = video.filename180lgr.debug(f"Processing video: {filename}")181182# check processing actions for camera183lgr.debug(f"enabled_actions: {action}")184185frames_enabled = "frames" in action186visualization_enabled = "visualization" in action187blur_enabled = "blur" in action188189lgr.debug(f"CONFIG frames_enabled: {frames_enabled}")190lgr.debug(f"CONFIG visualization_enabled: {visualization_enabled}")191lgr.debug(f"CONFIG blur_enabled: {blur_enabled}")192193out1, out2, frames_output_dir, sdk_url, snapshot_api_token, blur_url = (194None,195None,196None,197None,198None,199None,200)201202temp_dir = tempfile.mkdtemp()203204# Save the uploaded video file to the temporary directory205video_path = os.path.join(temp_dir, video.filename)206video.save(video_path)207208cap = ffmpegcv.VideoCapture(video_path)209210if not cap.isOpened():211lgr.debug("Error opening video stream or file")212exit(1)213214filename_stem = Path(video_path).stem215video_format_ext = "mp4"216217# Override FPS if provided218try:219fps = int(os.environ.get("FPS"))220except TypeError:221# ffmpegcv cap.fps is not reliable222# Calculate FPS manually by counting frames for 500ms223224fps_cap = cv2.VideoCapture(video_path)225frame_count = 0226last_frame_time = 0227while fps_cap.isOpened():228ret, _ = fps_cap.read()229last_frame_time = fps_cap.get(cv2.CAP_PROP_POS_MSEC)230# Stop at half a second or no more frames231if not ret or last_frame_time >= 500:232break233frame_count += 1234fps_cap.release()235assert last_frame_time > 0, "Video too short or frames are not readable"236fps = frame_count * 1000 / last_frame_time237lgr.debug(f"FPS: {fps}")238239if visualization_enabled:240output1_filename = (241f"{BASE_WORKING_DIR}{filename_stem}_visualization.{video_format_ext}"242)243out1 = init_writer(output1_filename, fps)244245if blur_enabled:246output2_filename = f"{BASE_WORKING_DIR}{filename_stem}_blur.{video_format_ext}"247out2 = init_writer(output2_filename, fps)248249# Create the output dir for frames if missing250if frames_enabled:251frames_output_dir = f"{BASE_WORKING_DIR}{filename_stem}_frames/"252Path(frames_output_dir).mkdir(parents=True, exist_ok=True)253lgr.debug(f"CONFIG frames_output_dir: {frames_output_dir}")254255# Parse visualization parameters256if visualization_enabled:257sdk_url = os.environ.get("SDK_URL")258snapshot_api_token = os.environ.get("TOKEN")259260lgr.debug(f"CONFIG sdk_url: {sdk_url}")261lgr.debug(f"CONFIG snapshot_api_token: {snapshot_api_token}")262263# Parse blur parameters264if blur_enabled:265blur_url = os.environ.get("BLUR_URL")266267try:268sample_rate = int(os.environ.get("SAMPLE"))269except Exception:270sample_rate = 5271keyframe_residue = 1 % sample_rate # for sample_rate = 1272interpolator = Interpolator(sample_rate, out2)273interpolator.start()274275start = time.time()276frame_count = 0277while cap.isOpened():278ret, frame = cap.read()279if ret:280lgr.debug(f"Processing frame: {frame_count}")281frame_count += 1282283if frames_enabled:284save_frame(frame_count, frame, frames_output_dir)285286if visualization_enabled:287# adding filled rectangle on each frame288visualized_frame = visualize_frame(frame, sdk_url, snapshot_api_token)289out1.write(visualized_frame)290291if blur_enabled:292if frame_count % sample_rate == keyframe_residue:293# Keyframe294polygons = get_blur_polygons(frame, blur_url)295interpolator.feed_keyframe(frame, frame_count, polygons)296else:297# Skipframes298interpolator.feed_skipframe(frame)299else:300break301302# Flush the remaining skipframes303if blur_enabled and interpolator.is_flush_needed(frame_count):304frame, _ = interpolator.frame_buffer.queue[-1]305polygons = get_blur_polygons(frame, blur_url)306interpolator.flush(frame_count, polygons)307interpolator.close()308309cap.release()310if out1:311out1.release()312if out2:313out2.release()314315lgr.debug(f"Frame count: {frame_count}")316lgr.debug(f"Time taken: {time.time() - start}")317lgr.debug(f"Done processing video {filename}")318os.remove(video_path)319os.rmdir(temp_dir)320321322app = Flask(__name__)323324325@app.route("/process-video", methods=["POST"])326def process_video_route():327if "upload" not in request.files or "action" not in request.form:328return jsonify({"error": "Invalid request"}), 400329330file = request.files["upload"]331action = request.form["action"]332333if file.filename == "":334return jsonify({"error": "No selected file"}), 400335336try:337process_video(file, action)338except Exception as e:339lgr.error("Error:", exc_info=e)340return jsonify({"error": str(e)}), 500341342return jsonify("Done."), 200343344345app.run(host="0.0.0.0", port=8081, debug=True)346347348