[vision] Add some more documentation

This commit is contained in:
Freek van Tienen
2015-03-15 20:25:38 +01:00
parent 263b9eea1d
commit da7f29ceff
8 changed files with 88 additions and 34 deletions
@@ -46,6 +46,9 @@ static void *v4l2_capture_thread(void *data);
* The main capturing thread
* This thread handles the queue and dequeue of buffers, to make sure only the latest
* image buffer is preserved for image processing.
* @param[in] *data The Video 4 Linux 2 device pointer
* @return 0 on succes, -1 if it isn able to fetch an image,
* -2 on timeout of taking an image, -3 on failing buffer dequeue
*/
static void *v4l2_capture_thread(void *data)
{
@@ -116,12 +119,13 @@ static void *v4l2_capture_thread(void *data)
/**
* Initialize a V4L2 subdevice.
* The subdevice name should be something like '/dev/v4l-subdev0'
* The pad and which indicate the way the subdevice should communicate
* with the real device. Which pad it should take.
* Code should be something like V4L2_MBUS_FMT_UYVY8_2X8. See the V4l2
* manual for available codes.
* Width and height are the amount of pixels this subdevice must cover.
* @param[in] *subdev_name The subdevice name (like /dev/v4l-subdev0)
* @param[in] pad,which The way the subdevice should comminicate and be
* connected to the real device.
* @param[in] code The encoding the subdevice uses (like V4L2_MBUS_FMT_UYVY8_2X8,
* see the V4L2 manual for available encodings)
* @param[in] width,height The width and height of the images
* @return Whether the subdevice was successfully initialized
*/
bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t code, uint16_t width, uint16_t height)
{
@@ -163,11 +167,12 @@ bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t
}
/**
* Initialize a V4L2(Video for Linux 2) device
* The device name should be something like "/dev/video1"
* The subdevice name can be empty if there is no subdevice
* The buffer_cnt are the amount of buffers used in memory mapping
* Note that you need to close this device at the end of you program!
* Initialize a V4L2(Video for Linux 2) device.
* Note that the device must be closed with v4l2_close(dev) at the end.
* @param[in] device_name The video device name (like /dev/video1)
* @param[in] width,height The width and height of the images
* @param[in] buffer_cnt The amount of buffers used for mapping
* @return The newly create V4L2 device
*/
struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height, uint8_t buffers_cnt) {
uint8_t i;
@@ -280,7 +285,7 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height
* Get the latest image buffer and lock it (Thread safe, BLOCKING)
* This functions blocks until image access is granted. This should not take that long, because
* it is only locked while enqueueing an image.
* Make sure you free the image after processing!
* Make sure you free the image after processing with v4l2_image_free()!
* @param[in] *dev The V4L2 video device we want to get an image from
* @param[out] *img The image that we got from the video device
*/
@@ -316,7 +321,7 @@ void v4l2_image_get(struct v4l2_device *dev, struct image_t *img) {
/**
* Get the latest image and lock it (Thread safe, NON BLOCKING)
* This function returns NULL if it can't get access to the current image.
* Make sure you free the image after processing!
* Make sure you free the image after processing with v4l2_image_free())!
* @param[in] *dev The V4L2 video device we want to get an image from
* @param[out] *img The image that we got from the video device
* @return Whether we got an image or not
@@ -351,6 +356,8 @@ bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img) {
/**
* Free the image and enqueue the buffer (Thread safe)
* This must be done after processing the image, because else all buffers are locked
* @param[in] *dev The video for linux device which the image is from
* @param[in] *img The image to free
*/
void v4l2_image_free(struct v4l2_device *dev, struct image_t *img)
{
@@ -368,8 +375,10 @@ void v4l2_image_free(struct v4l2_device *dev, struct image_t *img)
/**
* Start capturing images in streaming mode (Thread safe)
* Returns true when successfully started capturing. Not that it also returns
* FALSE when it already is in capturing mode.
* @param[in] *dev The video for linux device to start capturing from
* @return It resturns TRUE if it successfully started capture,
* but keep in mind that if it is already started it will
* return FALSE.
*/
bool_t v4l2_start_capture(struct v4l2_device *dev)
{
@@ -425,9 +434,10 @@ bool_t v4l2_start_capture(struct v4l2_device *dev)
/**
* Stop capturing of the image stream (Thread safe)
* Returns TRUE if it successfully stopped capturing. Note that it also returns FALSE
* when the capturing is already stopped. This function is blocking until capturing
* thread is closed.
* This function is blocking until capturing thread is closed.
* @param[in] *dev The video for linux device to stop capturing
* @return TRUE if it successfully stopped capturing. Note that it also returns FALSE
* when the capturing is already stopped.
*/
bool_t v4l2_stop_capture(struct v4l2_device *dev)
{
@@ -462,6 +472,7 @@ bool_t v4l2_stop_capture(struct v4l2_device *dev)
* Close the V4L2 device (Thread safe)
* This needs to be preformed to clean up all the buffers and close the device.
* Note that this also stops the capturing if it is still capturing.
* @param[in] *dev The video for linux device to close(cleanup)
*/
void v4l2_close(struct v4l2_device *dev)
{
@@ -391,3 +391,28 @@ int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct imag
return sum;
}
/**
* Show points in an image by coloring them through giving
* the pixels the maximum value.
* @param[in,out] *img The image to place the points on
* @param[in] *points The points to sohw
* @param[in] *points_cnt The amount of points to show
*/
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt)
{
uint8_t *img_buf = (uint8_t *)img->buf;
uint8_t pixel_width = (img->type == IMAGE_YUV422)? 2 : 1;
// Go trough all points and color them
for(int i = 0; i < points_cnt; i++) {
uint32_t idx = pixel_width*points[i].y*img->w + points[i].x*pixel_width;
img_buf[idx] = 255;
// YUV422 consists of 2 pixels
if(img->type == IMAGE_YUV422) {
idx++;
img_buf[idx] = 255;
}
}
}
@@ -68,5 +68,6 @@ void image_gradients(struct image_t *input, struct image_t *dx, struct image_t *
void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g);
uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff);
int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult);
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt);
#endif
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2014
* Copyright (C) 2014 G. de Croon
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
@@ -19,8 +20,8 @@
*/
/**
* @file modules/computer_vision/cv/opticflow/lucas_kanade.c
* @brief efficient fixed-point optical-flow
* @file modules/computer_vision/lib/vision/lucas_kanade.c
* @brief efficient fixed-point optical-flow calculation
*
* - Initial fixed-point C implementation by G. de Croon
* - Algorithm: Lucas-Kanade by Yves Bouguet
@@ -34,6 +35,20 @@
#include "lucas_kanade.h"
/**
* Compute the optical flow of several points using the Lucas-Kanade algorithm by Yves Bouguet
* The initial fixed-point implementation is doen by G. de Croon and is adapted by
* Freek van Tienen for the implementation in Paparazzi.
* @param[in] *new_img The newest grayscale image (TODO: fix YUV422 support)
* @param[in] *old_img The old grayscale image (TODO: fix YUV422 support)
* @param[in] *points Points to start tracking from
* @param[in] points_cnt The amount of points
* @param[out] *new_points The new locations of the points
* @param[out] *status Whether the point was tracked or not
* @param[in] half_window_size Half the window size (in both x and y direction) to search inside
* @param[in] max_iteration Maximum amount of iterations to find the new point
* @param[in] step_threshold The threshold at which the iterations should stop
*/
void opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t points_cnt,
struct point_t *new_points, bool_t *status, uint16_t half_window_size, uint8_t max_iterations, uint8_t step_threshold)
{
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2014
* Copyright (C) 2014 G. de Croon
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
@@ -19,9 +20,12 @@
*/
/**
* @file modules/computer_vision/cv/opticflow/lucas_kanade.h
* @brief efficient fixed-point optical-flow
* @file modules/computer_vision/lib/vision/lucas_kanade.c
* @brief efficient fixed-point optical-flow calculation
*
* - Initial fixed-point C implementation by G. de Croon
* - Algorithm: Lucas-Kanade by Yves Bouguet
* - Publication: http://robots.stanford.edu/cs223b04/algo_tracking.pdf
*/
#ifndef OPTIC_FLOW_INT_H
@@ -106,15 +106,9 @@ void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_
// FAST corner detection (TODO: non fixed threashold)
struct point_t *fast9_points = fast9_detect(img, 20, 5, &result->corner_cnt);
/*image_to_grayscale(img, img);
uint8_t *im = (uint8_t *)img->buf;
for(int i = 0; i < result->corner_cnt; i++) {
uint32_t idx = 2*fast9_points[i].y*opticflow->img_w + fast9_points[i].x*2;
im[idx] = 255;
idx = idx+1 % (opticflow->img_w*opticflow->img_h*2);
im[idx] = 255;
}*/
#if OPTICFLOW_SHOW_CORNERS
image_show_points(img, fast9_points, result->corner_cnt);
#endif
// *************************************************************************************
// Corner Tracking
@@ -115,6 +115,7 @@ void guidance_h_module_read_rc(void)
/**
* Main guidance loop
* @param[in] in_flight Whether we are in flight or not
*/
void guidance_h_module_run(bool_t in_flight)
{
@@ -124,8 +125,9 @@ void guidance_h_module_run(bool_t in_flight)
/**
* Update the controls based on a vision result
* @param[in] *result The opticflow calculation result used for control
*/
void stabilization_opticflow_update(struct opticflow_result_t* result)
void stabilization_opticflow_update(struct opticflow_result_t *result)
{
// *************************************************************************************
// Downlink Message
@@ -193,6 +193,8 @@ static void *opticflow_module_calc(void *data __attribute__((unused))) {
/**
* Get the altitude above ground of the drone
* @param[in] sender_id The id that send the ABI message (unused)
* @param[in] distance The distance above ground level in meters
*/
static void opticflow_agl_cb(uint8_t sender_id __attribute__((unused)), float distance)
{