252 lines
7.9 KiB
Python
Executable File
252 lines
7.9 KiB
Python
Executable File
#! /usr/bin/env python3
|
|
|
|
import argparse
|
|
import sys
|
|
import logging
|
|
import cv2
|
|
import subprocess
|
|
import shutil
|
|
import numpy as np
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
from typing import Optional
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def normalize_brightness(frame, mode):
|
|
if mode == "hist-norm":
|
|
frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX)
|
|
frame = cv2.equalizeHist(frame)
|
|
frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX)
|
|
elif mode == "hist":
|
|
frame = cv2.equalizeHist(frame)
|
|
elif mode == "basic":
|
|
frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX)
|
|
return frame
|
|
|
|
|
|
def main() -> int:
|
|
# Handle program arguments
|
|
ap = argparse.ArgumentParser(
|
|
prog="leap-view",
|
|
description="View the camera feeds from a Leap Motion controller",
|
|
)
|
|
ap.add_argument("device", help="Path to the video device")
|
|
ap.add_argument(
|
|
"-c",
|
|
"--camera",
|
|
help="Which camera(s) to display",
|
|
choices=["left", "right", "both", "raw"],
|
|
default="both",
|
|
)
|
|
ap.add_argument(
|
|
"-l",
|
|
"--led",
|
|
help="Which LEDs to enable",
|
|
choices=["left", "centre", "right", "sides", "all", "none"],
|
|
default="all",
|
|
)
|
|
ap.add_argument(
|
|
"-b",
|
|
"--brightness-normalization",
|
|
help="Brightness normalization modes",
|
|
choices=["none", "hist", "hist-norm", "basic"],
|
|
default="hist",
|
|
)
|
|
ap.add_argument("--record", help="Record the video to a file", action="store_true")
|
|
ap.add_argument(
|
|
"--colour-non-linear", help="Enable non-linear colour", action="store_true"
|
|
)
|
|
ap.add_argument(
|
|
"--resolution",
|
|
help="Resolution of the camera",
|
|
choices=["640x120", "640x240", "640x480", "752x120", "752x240", "752x480"],
|
|
default="752x480",
|
|
)
|
|
ap.add_argument(
|
|
"--upscale",
|
|
help="Upscaling factor",
|
|
type=float,
|
|
)
|
|
ap.add_argument(
|
|
"--squish", help="Downscale the image before upscaling", action="store_true"
|
|
)
|
|
ap.add_argument(
|
|
"--average-frames", help="Number of frames to average", type=int, default=0
|
|
)
|
|
ap.add_argument(
|
|
"--average-mode",
|
|
help="Averaging mode",
|
|
choices=["mean", "median", "min", "max"],
|
|
default="mean",
|
|
)
|
|
ap.add_argument(
|
|
"--video-root",
|
|
help="Root directory for video files",
|
|
default="~/Videos/leap-view",
|
|
)
|
|
ap.add_argument(
|
|
"-v", "--verbose", help="Enable verbose logging", action="store_true"
|
|
)
|
|
args = ap.parse_args()
|
|
|
|
# Configure logging
|
|
logging.basicConfig(
|
|
level=logging.DEBUG if args.verbose else logging.INFO,
|
|
format="%(levelname)s: %(message)s",
|
|
)
|
|
|
|
# Properly parse the video root
|
|
args.video_root = Path(args.video_root).expanduser()
|
|
|
|
# Determine where to save the video
|
|
video_file_name = datetime.now().strftime("LeapMotion-%Y-%m-%d_%H-%M-%S.avi")
|
|
|
|
# If we need to record the video
|
|
if args.record:
|
|
video_file_path = args.video_root / video_file_name
|
|
video_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
logger.info(f"Recording video to {video_file_path}")
|
|
video_output = cv2.VideoWriter(
|
|
str(video_file_path),
|
|
cv2.VideoWriter_fourcc(*"MJPG"),
|
|
30,
|
|
(
|
|
int(args.resolution.split("x")[0]) * 2,
|
|
int(args.resolution.split("x")[1]) - 1,
|
|
),
|
|
isColor=False,
|
|
)
|
|
|
|
# Open the video device
|
|
cap = cv2.VideoCapture(args.device)
|
|
|
|
if not cap.isOpened():
|
|
logger.error("Failed to open video device")
|
|
return 1
|
|
|
|
# Set the resolution
|
|
width, height = map(int, args.resolution.split("x"))
|
|
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
|
|
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
|
|
cap.set(cv2.CAP_PROP_CONVERT_RGB, 0)
|
|
|
|
# Configure the LEDs
|
|
# NOTE: See the libuvc for leap documentation for info about this
|
|
# https://github.com/ewpratten/leapuvc/blob/master/LeapUVC-Manual.pdf
|
|
cap.set(
|
|
cv2.CAP_PROP_CONTRAST, (2 | (int(args.led in ["left", "sides", "all"]) << 6))
|
|
)
|
|
cap.set(cv2.CAP_PROP_CONTRAST, (3 | (int(args.led in ["centre", "all"]) << 6)))
|
|
cap.set(
|
|
cv2.CAP_PROP_CONTRAST, (4 | (int(args.led in ["right", "sides", "all"]) << 6))
|
|
)
|
|
|
|
# Set non-linear color mode
|
|
cap.set(cv2.CAP_PROP_GAMMA, int(args.colour_non_linear))
|
|
|
|
# Allocate average frame buffer
|
|
average_frame_buf = []
|
|
|
|
# Read frames
|
|
while True:
|
|
ret, frame = cap.read()
|
|
|
|
if not ret:
|
|
logger.error("Failed to read frame")
|
|
break
|
|
|
|
# Check for a key press
|
|
if cv2.waitKey(1) & 0xFF == ord("q"):
|
|
break
|
|
|
|
# Reshape the frame
|
|
frame = np.reshape(frame, (height, width * 2))
|
|
|
|
# Ignore the last row of pixels
|
|
frame = frame[:-1, :]
|
|
|
|
# If we need to be averaging frames
|
|
if args.average_frames and args.average_frames > 0:
|
|
average_frame_buf.append(frame)
|
|
if len(average_frame_buf) > args.average_frames:
|
|
average_frame_buf.pop(0)
|
|
|
|
# Handle the averaging mode
|
|
if args.average_mode == "mean":
|
|
frame = np.mean(average_frame_buf, axis=0).astype(np.uint8)
|
|
elif args.average_mode == "median":
|
|
frame = np.median(average_frame_buf, axis=0).astype(np.uint8)
|
|
elif args.average_mode == "min":
|
|
frame = np.min(average_frame_buf, axis=0).astype(np.uint8)
|
|
elif args.average_mode == "max":
|
|
frame = np.max(average_frame_buf, axis=0).astype(np.uint8)
|
|
|
|
# If asked for a raw frame, show it and continue
|
|
if args.camera == "raw":
|
|
frame = normalize_brightness(frame, args.brightness_normalization)
|
|
cv2.imshow("Raw", frame)
|
|
continue
|
|
|
|
# Split into left and right frames (every other byte)
|
|
left_frame = frame[:, 0::2]
|
|
right_frame = frame[:, 1::2]
|
|
|
|
# Fix brightness issues
|
|
left_frame = normalize_brightness(left_frame, args.brightness_normalization)
|
|
right_frame = normalize_brightness(right_frame, args.brightness_normalization)
|
|
|
|
# Average down by a 2x2 square
|
|
if args.squish:
|
|
left_frame = cv2.resize(left_frame, (width // 2, height // 2))
|
|
right_frame = cv2.resize(right_frame, (width // 2, height // 2))
|
|
|
|
left_frame = cv2.resize(left_frame, (width, height))
|
|
right_frame = cv2.resize(right_frame, (width, height))
|
|
|
|
# If we should be recording the video
|
|
if args.record:
|
|
# Create a new frame that is twice as wide with both images side by side
|
|
video_frame = np.concatenate((left_frame, right_frame), axis=1)
|
|
|
|
# Crop to the correct resolution
|
|
video_frame = video_frame[:height, : width * 2]
|
|
|
|
# Write the frame to the video
|
|
video_output.write(video_frame)
|
|
|
|
# If we need to do upscaling, do it now
|
|
if args.upscale:
|
|
left_frame = cv2.resize(
|
|
left_frame, (int(width * args.upscale), int(height * args.upscale))
|
|
)
|
|
right_frame = cv2.resize(
|
|
right_frame, (int(width * args.upscale), int(height * args.upscale))
|
|
)
|
|
|
|
# Show the frame
|
|
if args.camera == "left":
|
|
cv2.imshow("Left", left_frame)
|
|
if args.camera == "right":
|
|
cv2.imshow("Right", right_frame)
|
|
|
|
# If we need to show both cameras
|
|
if args.camera == "both":
|
|
frame = np.concatenate((left_frame, right_frame), axis=1)
|
|
cv2.imshow("Both", frame)
|
|
|
|
# Clean up
|
|
cap.release()
|
|
cv2.destroyAllWindows()
|
|
|
|
if args.record:
|
|
video_output.release()
|
|
logger.info(f"Video saved to {video_file_path}")
|
|
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|