Merge pull request #1143 from paparazzi/opticflow

[vision] Opticflow rewrite: 
-less grabbing threads
-faster grabbing
-static linking issues fixed properly
-documentation
-rewritten code
This commit is contained in:
Christophe De Wagter
2015-04-09 11:47:38 +02:00
49 changed files with 5570 additions and 9624 deletions
+3 -3
View File
@@ -39,12 +39,12 @@ else
FLOAT_ABI ?= -mfloat-abi=softfp -mfpu=vfp
endif
ARCH_FLAGS ?= -mtune=cortex-a8 -march=armv7-a
ARCH_CFLAGS ?= -mtune=cortex-a8 -march=armv7-a
# add ARM specifc flags to CFLAGS, LDFLAGS
CFLAGS += $(FLOAT_ABI) $(ARCH_FLAGS)
CFLAGS += $(FLOAT_ABI) $(ARCH_CFLAGS)
LDFLAGS += $(FLOAT_ABI)
CXXFLAGS += $(FLOAT_ABI) $(ARCH_FLAGS)
CXXFLAGS += $(FLOAT_ABI) $(ARCH_CFLAGS)
# include the common linux Makefile (common CFLAGS, actual targets)
include $(PAPARAZZI_SRC)/conf/Makefile.linux
+4
View File
@@ -42,6 +42,10 @@ $(TARGET).CFLAGS +=-DARDRONE2_RAW
# handle linux signals by hand
$(TARGET).CFLAGS += -DUSE_LINUX_SIGNAL
# Link static (Done for GLIBC)
$(TARGET).CFLAGS += -DLINUX_LINK_STATIC
$(TARGET).LDFLAGS += -static
# -----------------------------------------------------------------------
# default LED configuration
+4
View File
@@ -34,6 +34,10 @@ $(TARGET).CFLAGS += -DUSE_LINUX_SIGNAL
# Compile the video specific parts
$(TARGET).srcs += $(SRC_BOARD)/video.c
# Link static (Done for GLIBC)
$(TARGET).CFLAGS += -DLINUX_LINK_STATIC
$(TARGET).LDFLAGS += -static
# -----------------------------------------------------------------------
# default LED configuration
+14 -23
View File
@@ -1972,31 +1972,22 @@
<field name="t" type="uint32"/>
</message>
<message name="OF_HOVER" id="228">
<field name="FPS" type="float"/>
<field name="dx" type="float"/>
<field name="dy" type="float"/>
<field name="dx_trans" type="float"/>
<field name="dy_trans" type="float"/>
<field name="diff_roll" type="float"/>
<field name="diff_pitch" type="float"/>
<field name="velx" type="float"/>
<field name="vely" type="float"/>
<field name="velx_Ned" type="float"/>
<field name="vely_Ned" type="float"/>
<field name="z_sonar" type="float"/>
<field name="count" type="int32"/>
</message>
<message name="VISION_STABILIZATION" id="229">
<field name="velx" type="float"/>
<field name="vely" type="float"/>
<field name="velx_i" type="float"/>
<field name="vely_i" type="float"/>
<field name="cmd_phi" type="int32"/>
<field name="cmd_theta" type="int32"/>
<message name="OPTIC_FLOW_EST" id="228">
<field name="fps" type="float"/>
<field name="corner_cnt" type="uint16"/>
<field name="tracked_cnt" type="uint16"/>
<field name="flow_x" type="int16" unit="subpixels"/>
<field name="flow_y" type="int16" unit="subpixels"/>
<field name="flow_der_x" type="int16" unit="subpixels"/>
<field name="flow_der_y" type="int16" unit="subpixels"/>
<field name="vel_x" type="float" unit="cm/s"/>
<field name="vel_y" type="float" unit="cm/s"/>
<field name="cmd_phi" type="int32" alt_unit="deg" alt_unit_coef="0.0139882"/>
<field name="cmd_theta" type="int32" alt_unit="deg" alt_unit_coef="0.0139882"/>
</message>
<!--229 is free -->
<message name="AHRS_ARDRONE2" id="230">
<field name="state" type="uint32" />
<field name="control_state" type="uint32" values="DEFAULT|INIT|LANDING|FLYING|HOVERING|TEST|TRANS_TAKEOFF|TRANS_GOTOFIX|TRANS_LANDING|TRANS_LOOPING"/>
+101 -44
View File
@@ -3,38 +3,78 @@
<module name="cv_opticflow" dir="computer_vision">
<doc>
<description>
Compute Optic Flow from Ardrone2 Bottom Camera
Computes Pitch- and rollrate corrected optic flow from downward looking
ARDrone2 camera looking at a textured floor.
Hovers the drone based on optical flow made for Linux video Devices.
Computes Pitch- and roll attide from downward looking camera looking at a textured floor.
- Sonar is required.
- Controller can hold position
</description>
<!-- Satbilization parameters and gains -->
<section name="VISION" prefix="VISION_">
<define name="HOVER" value="FALSE" description="TRUE/FALSE active or not"/>
<define name="PHI_PGAIN" value="500" description="optic flow pgain"/>
<define name="PHI_IGAIN" value="10" description="optic flow igain"/>
<define name="THETA_PGAIN" value="500" description="optic flow pgain"/>
<define name="THETA_IGAIN" value="10" description="optic flow igain"/>
<define name="DESIRED_VX" value="0" description="feedforward optic flow vx"/>
<define name="DESIRED_VY" value="0" description="feedforward optic flow vy"/>
<define name="PHI_PGAIN" value="400" description="Optic flow proportional gain on the roll velocity error"/>
<define name="PHI_IGAIN" value="20" description="Optic flow integrated gain on the summed roll velocity error"/>
<define name="THETA_PGAIN" value="400" description="Optic flow proportional gain on the pitch velocity error"/>
<define name="THETA_IGAIN" value="20" description="Optic flow integrated gain on the summed pitch velocity error"/>
<define name="DESIRED_VX" value="0" description="The desired velocity in the body frame x direction"/>
<define name="DESIRED_VY" value="0" description="The desired velocity in the body frame y direction"/>
</section>
<!-- Optical flow calculation parameters -->
<section name="OPTICFLOW" prefix="OPTICFLOW_">
<define name="AGL_ID" value="ABI_SENDER_ID" description="ABI sender id for AGL message (sonar measurement) (default: ABI_BROADCAST)"/>
<!-- Video device parameters -->
<define name="DEVICE" value="/dev/video2" description="The V4L2 camera device that is used for the calculations"/>
<define name="DEVICE_SIZE" value="320,240" description="The V4L2 camera device width and height"/>
<define name="DEVICE_BUFFERS" value="15" description="Amount of V4L2 video buffers"/>
<define name="SUBDEV" description="The V4L2 subdevice to initialize before the main device"/>
<!-- Camera parameters -->
<define name="FOV_W" value="0.89360857702" description="The field of view width of the bottom camera (Defaults are from an ARDrone 2)"/>
<define name="FOV_H" value="0.67020643276" description="The field of view height of the bottom camera (Defaults are from an ARDrone 2)"/>
<define name="FX" value="343.1211" description="Field in the x direction of the camera (Defaults are from an ARDrone 2)"/>
<define name="FY" value="348.5053" description="Field in the y direction of the camera (Defaults are from an ARDrone 2)"/>
<!-- Lucas Kanade optical flow calculation parameters -->
<define name="MAX_TRACK_CORNERS" value="25" description="The maximum amount of corners the Lucas Kanade algorithm is tracking between two frames"/>
<define name="WINDOW_SIZE" value="10" description="Window size used in Lucas Kanade algorithm"/>
<define name="SUBPIXEL_FACTOR" value="10" description="Amount of subpixels per pixel, used for more precise (subpixel) calculations of the flow"/>
<define name="MAX_ITERATIONS" value="10" description="Maximum number of iterations the Lucas Kanade algorithm should take"/>
<define name="THRESHOLD_VEC" value="2" description="TThreshold in subpixels when the iterations of Lucas Kanade should stop"/>
<!-- FAST9 corner detection parameters -->
<define name="FAST9_ADAPTIVE" value="TRUE" description="Whether we should use and adapative FAST9 crner detection threshold"/>
<define name="FAST9_THRESHOLD" value="20" description="FAST9 default threshold"/>
<define name="FAST9_MIN_DISTANCE" value="10" description="The amount of pixels between corners that should be detected"/>
</section>
<define name="DOWNLINK_VIDEO" value="FALSE" description="Also stream video: warning: this makes the optic flow slow: DEBUGGING only" />
<define name="OPTICFLOW_AGL_ID" value="ABI_SENDER_ID" description="ABI sender id for AGL message (sonar measurement) (default: ABI_BROADCAST)"/>
</doc>
<settings>
<dl_settings>
<dl_settings NAME="Vision Loop">
<dl_setting var="activate_opticflow_hover" min="0" step="1" max="1" module="computer_vision/opticflow/hover_stabilization" shortname="hover" param="VISION_HOVER" values="FALSE|TRUE"/>
<dl_setting var="vision_phi_pgain" min="0" step="1" max="10000" shortname="kp_v_phi" param="VISION_PHI_PGAIN"/>
<dl_setting var="vision_phi_igain" min="0" step="1" max="1000" shortname="ki_v_phi" param="VISION_PHI_IGAIN"/>
<dl_setting var="vision_theta_pgain" min="0" step="1" max="10000" shortname="kp_v_theta" param="VISION_THETA_PGAIN"/>
<dl_setting var="vision_theta_igain" min="0" step="1" max="1000" shortname="ki_v_theta" param="VISION_THETA_IGAIN"/>
<dl_setting var="vision_desired_vx" min="-5" step="0.01" max="5" shortname="desired_vx" param="VISION_DESIRED_VX"/>
<dl_setting var="vision_desired_vy" min="-5" step="0.01" max="5" shortname="desired_vy" param="VISION_DESIRED_VY"/>
</dl_settings> </dl_settings>
<dl_settings NAME="Vision stabilization">
<!-- Satabilization loop parameters and gains -->
<dl_settings name="vision_stab">
<dl_setting var="opticflow_stab.phi_pgain" module="computer_vision/opticflow_module" min="0" step="1" max="10000" shortname="kp_v_phi" param="VISION_PHI_PGAIN"/>
<dl_setting var="opticflow_stab.phi_igain" module="computer_vision/opticflow_module" min="0" step="1" max="1000" shortname="ki_v_phi" param="VISION_PHI_IGAIN"/>
<dl_setting var="opticflow_stab.theta_pgain" module="computer_vision/opticflow_module" min="0" step="1" max="10000" shortname="kp_v_theta" param="VISION_THETA_PGAIN"/>
<dl_setting var="opticflow_stab.theta_igain" module="computer_vision/opticflow_module" min="0" step="1" max="1000" shortname="ki_v_theta" param="VISION_THETA_IGAIN"/>
<dl_setting var="opticflow_stab.desired_vx" module="computer_vision/opticflow_module" min="-5" step="0.01" max="5" shortname="desired_vx" param="VISION_DESIRED_VX"/>
<dl_setting var="opticflow_stab.desired_vy" module="computer_vision/opticflow_module" min="-5" step="0.01" max="5" shortname="desired_vy" param="VISION_DESIRED_VY"/>
</dl_settings>
<!-- Optical flow calculations parameters for Lucas Kanade and FAST9 -->
<dl_settings name="vision_calc">
<dl_setting var="opticflow.max_track_corners" module="computer_vision/opticflow_module" min="0" step="1" max="500" shortname="max_trck_corners" param="OPTICFLOW_MAX_TRACK_CORNERS"/>
<dl_setting var="opticflow.window_size" module="computer_vision/opticflow_module" min="0" step="1" max="500" shortname="window_size" param="OPTICFLOW_WINDOW_SIZE"/>
<dl_setting var="opticflow.subpixel_factor" module="computer_vision/opticflow_module" min="0" step="1" max="100" shortname="subpixel_factor" param="OPTICFLOW_SUBPIXEL_FACTOR"/>
<dl_setting var="opticflow.max_iterations" module="computer_vision/opticflow_module" min="0" step="1" max="100" shortname="max_iterations" param="OPTICFLOW_MAX_ITERATIONS"/>
<dl_setting var="opticflow.threshold_vec" module="computer_vision/opticflow_module" min="0" step="1" max="100" shortname="threshold_vec" param="OPTICFLOW_THRESHOLD_VEC"/>
<dl_setting var="opticflow.fast9_adaptive" module="computer_vision/opticflow_module" min="0" step="1" max="1" values="TRUE|FALSE" shortname="fast9_adaptive" param="OPTICFLOW_FAST9_ADAPTIVE"/>
<dl_setting var="opticflow.fast9_threshold" module="computer_vision/opticflow_module" min="0" step="1" max="255" shortname="fast9_threshold" param="OPTICFLOW_FAST9_THRESHOLD"/>
<dl_setting var="opticflow.fast9_min_distance" module="computer_vision/opticflow_module" min="0" step="1" max="500" shortname="fast9_min_distance" param="OPTICFLOW_FAST9_MIN_DISTANCE"/>
</dl_settings>
</dl_settings>
</settings>
<header>
@@ -42,31 +82,48 @@
</header>
<init fun="opticflow_module_init()"/>
<periodic fun="opticflow_module_run()" start="opticflow_module_start()" stop="opticflow_module_stop()" autorun="TRUE"/>
<makefile target="ap">
<define name="ARDRONE_VIDEO_PORT" value="2002" />
<file name="opticflow_module.c"/>
<file name="opticflow_thread.c" dir="modules/computer_vision/opticflow"/>
<file name="visual_estimator.c" dir="modules/computer_vision/opticflow"/>
<file name="hover_stabilization.c" dir="modules/computer_vision/opticflow"/>
<file name="optic_flow_int.c" dir="modules/computer_vision/cv/opticflow"/>
<file name="fastRosten.c" dir="modules/computer_vision/cv/opticflow/fast9"/>
<file name="trig.c" dir="modules/computer_vision/cv"/>
<file name="framerate.c" dir="modules/computer_vision/cv"/>
<file name="jpeg.c" dir="modules/computer_vision/cv/encoding"/>
<file name="rtp.c" dir="modules/computer_vision/cv/encoding"/>
<file name="socket.c" dir="modules/computer_vision/lib/udp"/>
<makefile target="ap">
<!-- Include the needed Computer Vision files -->
<define name="modules/computer_vision" type="include"/>
<file name="image.c" dir="modules/computer_vision/lib/vision"/>
<file name="jpeg.c" dir="modules/computer_vision/lib/encoding"/>
<file name="rtp.c" dir="modules/computer_vision/lib/encoding"/>
<file name="v4l2.c" dir="modules/computer_vision/lib/v4l"/>
<define name="modules/computer_vision/cv" type="include"/>
<define name="modules/computer_vision/lib" type="include"/>
<define name="pthread" type="raw"/>
<define name="__USE_GNU"/>
<flag name="LDFLAGS" value="pthread"/>
<flag name="LDFLAGS" value="lrt"/>
<flag name="LDFLAGS" value="static"/>
<!-- The optical flow module (calculator+stabilization) -->
<file name="opticflow_module.c"/>
<file name="opticflow_calculator.c" dir="modules/computer_vision/opticflow"/>
<file name="stabilization_opticflow.c" dir="modules/computer_vision/opticflow"/>
<!-- Main vision calculations -->
<file name="fast_rosten.c" dir="modules/computer_vision/lib/vision"/>
<file name="lucas_kanade.c" dir="modules/computer_vision/lib/vision"/>
<raw>
VIEWVIDEO_DEV ?= UDP1
VIEWVIDEO_HOST ?= $(MODEM_HOST)
VIEWVIDEO_PORT_OUT ?= 5000
VIEWVIDEO_PORT_IN ?= 4999
VIEWVIDEO_BROADCAST ?= $(MODEM_BROADCAST)
VIEWVIDEO_DEV_LOWER = $(shell echo $(VIEWVIDEO_DEV) | tr A-Z a-z)
VIEWVID_G_CFLAGS = -DVIEWVIDEO_HOST=\"$(VIEWVIDEO_HOST)\" -DVIEWVIDEO_PORT_OUT=$(VIEWVIDEO_PORT_OUT)
VIEWVID_CFLAGS = -DUSE_$(VIEWVIDEO_DEV) -DVIEWVIDEO_DEV=$(VIEWVIDEO_DEV_LOWER)
VIEWVID_CFLAGS += -D$(VIEWVIDEO_DEV)_PORT_OUT=$(VIEWVIDEO_PORT_OUT) -D$(VIEWVIDEO_DEV)_PORT_IN=$(VIEWVIDEO_PORT_IN)
VIEWVID_CFLAGS += -D$(VIEWVIDEO_DEV)_BROADCAST=$(VIEWVIDEO_BROADCAST) -D$(VIEWVIDEO_DEV)_HOST=\"$(VIEWVIDEO_HOST)\"
ifeq ($(VIEWVIDEO_USE_NC),)
ap.CFLAGS += $(VIEWVID_G_CFLAGS) $(VIEWVID_CFLAGS)
else
ap.CFLAGS += $(VIEWVID_G_CFLAGS) -DVIEWVIDEO_USE_NC
endif
ap.CFLAGS += -DGUIDANCE_V_MODE_MODULE_SETTING=GUIDANCE_V_MODE_HOVER
ap.CFLAGS += -DGUIDANCE_H_MODE_MODULE_SETTING=GUIDANCE_H_MODE_MODULE
</raw>
</makefile>
<makefile target="nps">
<file name="viewvideo_nps.c"/>
</makefile>
+3 -2
View File
@@ -37,8 +37,9 @@
<!-- Include the needed Computer Vision files -->
<define name="modules/computer_vision" type="include"/>
<file name="jpeg.c" dir="modules/computer_vision/cv/encoding"/>
<file name="rtp.c" dir="modules/computer_vision/cv/encoding"/>
<file name="image.c" dir="modules/computer_vision/lib/vision"/>
<file name="jpeg.c" dir="modules/computer_vision/lib/encoding"/>
<file name="rtp.c" dir="modules/computer_vision/lib/encoding"/>
<file name="v4l2.c" dir="modules/computer_vision/lib/v4l"/>
<!-- Define the network connection to send images over -->
+1
View File
@@ -21,6 +21,7 @@
<message name="UART_ERRORS" period="3.1"/>
<message name="DATALINK_REPORT" period="5.1"/>
<message name="STATE_FILTER_STATUS" period="3.2"/>
<message name="OPTIC_FLOW_EST" period="0.25"/>
</mode>
<mode name="ppm">
+9 -4
View File
@@ -40,7 +40,7 @@
/**
* Create UDP socket and bind it.
* @param[out] sock pointer to already allocated UdpSocket struct
* @param[in] host hostname/address
* @param[in] host ip address or hostname (hostname not possible if static linking)
* @param[in] port_out output port
* @param[in] port_in input port (set to < 0 to disable)
* @param[in] broadcast if TRUE enable broadcasting
@@ -52,6 +52,7 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port
return -1;
}
#ifndef LINUX_LINK_STATIC
/* try to convert host ipv4 address to binary format */
struct in_addr host_ip;
if (!inet_aton(host, &host_ip)) {
@@ -66,11 +67,11 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port
if (hp->h_addrtype == AF_INET && hp->h_length == 4) {
/* simply use first address */
memcpy(&host_ip.s_addr, hp->h_addr_list[0], hp->h_length);
}
else {
} else {
return -1;
}
}
#endif
// Create the socket with the correct protocl
sock->sockfd = socket(PF_INET, SOCK_DGRAM, 0);
@@ -96,7 +97,11 @@ int udp_socket_create(struct UdpSocket *sock, char *host, int port_out, int port
// set the output/destination address for use in sendto later
sock->addr_out.sin_family = PF_INET;
sock->addr_out.sin_port = htons(port_out);
#ifndef LINUX_LINK_STATIC
sock->addr_out.sin_addr.s_addr = host_ip.s_addr;
#else
sock->addr_out.sin_addr.s_addr = inet_addr(host);
#endif
return 0;
}
@@ -114,7 +119,7 @@ int udp_socket_send(struct UdpSocket *sock, uint8_t *buffer, uint16_t len)
}
ssize_t bytes_sent = sendto(sock->sockfd, buffer, len, 0,
(struct sockaddr *)&sock->addr_out, sizeof(sock->addr_out));
(struct sockaddr *)&sock->addr_out, sizeof(sock->addr_out));
if (bytes_sent != len) {
TRACE(TRACE_ERROR, "error sending to sock %d (%d)\n", (int)bytes_sent, strerror(errno));
}
+2 -3
View File
@@ -284,10 +284,10 @@ static void *navdata_read(void *data __attribute__((unused)))
if (pint != NULL) {
memmove(navdata_buffer, pint, NAVDATA_PACKET_SIZE - (pint - navdata_buffer));
buffer_idx = pint - navdata_buffer;
fprintf(stderr, "[navdata] sync error, startbyte not found, resetting...\n");
} else {
buffer_idx = 0;
}
fprintf(stderr, "[navdata] sync error, startbyte not found, resetting...\n");
continue;
}
@@ -379,8 +379,7 @@ void navdata_update()
navdata.imu_available = TRUE;
navdata.packetsRead++;
}
else {
} else {
// no new packet available, still unlock mutex again
pthread_mutex_unlock(&navdata_mutex);
}
-33
View File
@@ -144,36 +144,3 @@ static uint8_t actuators_bebop_checksum(uint8_t *bytes, uint8_t size)
return checksum;
}
/*static void actuators_bebop_saturate(void) {
// Find the lowest and highest commands
int32_t max_cmd = 9000; // Should be gotton from airframe file per motor
int32_t min_cmd = 3000; // Should be gotton from airframe file per motor
for(int i = 0; i < 4; i++) {
if(actuators_bebop.rpm_ref[i] > max_cmd)
max_cmd = actuators_bebop.rpm_ref[i];
if(actuators_bebop.rpm_ref[i] < min_cmd)
min_cmd = actuators_bebop.rpm_ref[i];
}
// Find the maximum motor command (Saturated motor or either MOTOR_MIXING_MAX_MOTOR)
int32_t max_motor = 9000;
for(int i = 0; i < 4; i++) {
if(actuators_bebop.rpm_obs[i] & (1<<15) && max_cmd > (actuators_bebop.rpm_obs[i] & ~(1<<15)))
max_motor = actuators_bebop.rpm_obs[i] & ~(1<<15);
}
// Saturate the offsets
if(max_cmd > max_motor) {
int32_t saturation_offset = 9000 - max_cmd;
for(int i = 0; i < 4; i++)
actuators_bebop.rpm_ref[i] += saturation_offset;
motor_mixing.nb_saturation++;
}
else if(min_cmd < 3000) {
int32_t saturation_offset = 3000 - min_cmd;
for(int i = 0; i < 4; i++)
actuators_bebop.rpm_ref[i] += saturation_offset;
motor_mixing.nb_saturation++;
}
}*/
@@ -1,88 +0,0 @@
/*
* Copyright (C) 2012-2013
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include <stdint.h>
#include "image.h"
inline void grayscale_uyvy(struct img_struct *input, struct img_struct *output);
inline void grayscale_uyvy(struct img_struct *input, struct img_struct *output)
{
uint8_t *source = input->buf;
uint8_t *dest = output->buf;
source++;
for (int y = 0; y < output->h; y++) {
for (int x = 0; x < output->w; x++) {
// UYVY
*dest++ = 127; // U
*dest++ = *source; // Y
source += 2;
}
}
}
inline int colorfilt_uyvy(struct img_struct *input, struct img_struct *output, uint8_t y_m, uint8_t y_M, uint8_t u_m,
uint8_t u_M, uint8_t v_m, uint8_t v_M);
inline int colorfilt_uyvy(struct img_struct *input, struct img_struct *output, uint8_t y_m, uint8_t y_M, uint8_t u_m,
uint8_t u_M, uint8_t v_m, uint8_t v_M)
{
int cnt = 0;
uint8_t *source = input->buf;
uint8_t *dest = output->buf;
for (int y = 0; y < output->h; y++) {
for (int x = 0; x < output->w; x += 2) {
// Color Check:
if (
// Light
(dest[1] >= y_m)
&& (dest[1] <= y_M)
&& (dest[0] >= u_m)
&& (dest[0] <= u_M)
&& (dest[2] >= v_m)
&& (dest[2] <= v_M)
) { // && (dest[2] > 128))
cnt ++;
// UYVY
dest[0] = 64; // U
dest[1] = source[1]; // Y
dest[2] = 255; // V
dest[3] = source[3]; // Y
} else {
// UYVY
char u = source[0] - 127;
u /= 4;
dest[0] = 127; // U
dest[1] = source[1]; // Y
u = source[2] - 127;
u /= 4;
dest[2] = 127; // V
dest[3] = source[3]; // Y
}
dest += 4;
source += 4;
}
}
return cnt;
}
@@ -1,76 +0,0 @@
/*
* Copyright (C) 2015 The Paparazzi Community
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/cv/framerate.c
*
*/
#include "std.h"
#include "framerate.h"
// Frame Rate (FPS)
#include <sys/time.h>
// local variables
volatile long timestamp;
struct timeval start_time;
struct timeval end_time;
#define USEC_PER_SEC 1000000L
static long time_elapsed(struct timeval *t1, struct timeval *t2)
{
long sec, usec;
sec = t2->tv_sec - t1->tv_sec;
usec = t2->tv_usec - t1->tv_usec;
if (usec < 0) {
--sec;
usec = usec + USEC_PER_SEC;
}
return sec * USEC_PER_SEC + usec;
}
static void start_timer(void)
{
gettimeofday(&start_time, NULL);
}
static long end_timer(void)
{
gettimeofday(&end_time, NULL);
return time_elapsed(&start_time, &end_time);
}
void framerate_init(void) {
// Frame Rate Initialization
timestamp = 0;
start_timer();
}
float framerate_run(void) {
// FPS
timestamp = end_timer();
float framerate_FPS = (float) 1000000 / (float)timestamp;
// printf("dt = %d, FPS = %f\n",timestamp, FPS);
start_timer();
return framerate_FPS;
}
@@ -1,28 +0,0 @@
/*
* Copyright (C) 2015 The Paparazzi Community
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/cv/framerate.h
*
*/
void framerate_init(void);
float framerate_run(void);
@@ -1,35 +0,0 @@
/*
* Copyright (C) 2012-2013
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef _MY_IMAGE_HEADER_
#define _MY_IMAGE_HEADER_
struct img_struct {
int seq;
double timestamp;
unsigned char *buf;
int w;
int h;
};
#endif
@@ -1,30 +0,0 @@
Copyright(c) 2006, 2008 Edward Rosten
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and / or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -1,45 +0,0 @@
/*
* Copyright (C) 2014
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/cv/opticflow/optic_flow_int.h
* @brief efficient fixed-point optical-flow
*
*/
#ifndef OPTIC_FLOW_INT_H
#define OPTIC_FLOW_INT_H
void multiplyImages(int *ImA, int *ImB, int *ImC, int width, int height);
void getImageDifference(int *ImA, int *ImB, int *ImC, int width, int height);
void getSubPixel_gray(int *Patch, unsigned char *frame_buf, int center_x, int center_y, int half_window_size,
int subpixel_factor);
void getGradientPatch(int *Patch, int *DX, int *DY, int half_window_size);
int getSumPatch(int *Patch, int size);
int calculateG(int *G, int *DX, int *DY, int half_window_size);
int calculateError(int *ImC, int width, int height);
int opticFlowLK(unsigned char *new_image_buf, unsigned char *old_image_buf, int *p_x, int *p_y, int n_found_points,
int imW, int imH, int *new_x, int *new_y, int *status, int half_window_size, int max_iterations);
void quick_sort(float *a, int n);
void quick_sort_int(int *a, int n);
void CvtYUYV2Gray(unsigned char *grayframe, unsigned char *frame, int imW, int imH);
void OFfilter(float *OFx, float *OFy, float dx, float dy, int count, int OF_FilterType);
#endif /* OPTIC_FLOW_INT_H */
@@ -1,61 +0,0 @@
/*
* Copyright (C) 2012-2013
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include <stdint.h>
#include "image.h"
/** Simplified high-speed low CPU downsample function without averaging
*
* downsample factor must be 1, 2, 4, 8 ... 2^X
* image of typ UYVY expected. Only one color UV per 2 pixels
*
* we keep the UV color of the first pixel pair
* and sample the intensity evenly 1-3-5-7-... or 1-5-9-...
*
* input: u1y1 v1y2 u3y3 v3y4 u5y5 v5y6 u7y7 v7y8 ...
* downsample=1 u1y1 v1y2 u3y3 v3y4 u5y5 v5y6 u7y7 v7y8 ...
* downsample=2 u1y1v1 (skip2) y3 (skip2) u5y5v5 (skip2 y7 (skip2) ...
* downsample=4 u1y1v1 (skip6) y5 (skip6) ...
*/
inline void resize_uyuv(struct img_struct *input, struct img_struct *output, int downsample);
inline void resize_uyuv(struct img_struct *input, struct img_struct *output, int downsample)
{
uint8_t *source = input->buf;
uint8_t *dest = output->buf;
int pixelskip = (downsample - 1) * 2;
for (int y = 0; y < output->h; y++) {
for (int x = 0; x < output->w; x += 2) {
// YUYV
*dest++ = *source++; // U
*dest++ = *source++; // Y
*dest++ = *source++; // V
source += pixelskip;
*dest++ = *source++; // Y
source += pixelskip;
}
// read 1 in every 'downsample' rows, so skip (downsample-1) rows after reading the first
source += (downsample-1) * input->w * 2;
}
}
@@ -1,159 +0,0 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Trigonometry
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "trig.h"
static int cosine[] = {
10000, 9998, 9994, 9986, 9976, 9962, 9945, 9925, 9903, 9877,
9848, 9816, 9781, 9744, 9703, 9659, 9613, 9563, 9511, 9455,
9397, 9336, 9272, 9205, 9135, 9063, 8988, 8910, 8829, 8746,
8660, 8572, 8480, 8387, 8290, 8192, 8090, 7986, 7880, 7771,
7660, 7547, 7431, 7314, 7193, 7071, 6947, 6820, 6691, 6561,
6428, 6293, 6157, 6018, 5878, 5736, 5592, 5446, 5299, 5150,
5000, 4848, 4695, 4540, 4384, 4226, 4067, 3907, 3746, 3584,
3420, 3256, 3090, 2924, 2756, 2588, 2419, 2250, 2079, 1908,
1736, 1564, 1392, 1219, 1045, 872, 698, 523, 349, 175,
0
};
int sin_zelf(int ix)
{
while (ix < 0) {
ix = ix + 360;
}
while (ix >= 360) {
ix = ix - 360;
}
if (ix < 90) { return cosine[90 - ix] / 10; }
if (ix < 180) { return cosine[ix - 90] / 10; }
if (ix < 270) { return -cosine[270 - ix] / 10; }
if (ix < 360) { return -cosine[ix - 270] / 10; }
return 0;
}
int cos_zelf(int ix)
{
while (ix < 0) {
ix = ix + 360;
}
while (ix >= 360) {
ix = ix - 360;
}
if (ix < 90) { return cosine[ix] / 10; }
if (ix < 180) { return -cosine[180 - ix] / 10; }
if (ix < 270) { return -cosine[ix - 180] / 10; }
if (ix < 360) { return cosine[360 - ix] / 10; }
return 0;
}
int tan_zelf(int ix)
{
while (ix < 0) {
ix = ix + 360;
}
while (ix >= 360) {
ix = ix - 360;
}
if (ix == 90) { return 9999; }
if (ix == 270) { return -9999; }
if (ix < 90) { return (1000 * cosine[90 - ix]) / cosine[ix]; }
if (ix < 180) { return -(1000 * cosine[ix - 90]) / cosine[180 - ix]; }
if (ix < 270) { return (1000 * cosine[270 - ix]) / cosine[ix - 180]; }
if (ix < 360) { return -(1000 * cosine[ix - 270]) / cosine[360 - ix]; }
return 0;
}
int asin_zelf(int y, int hyp)
{
int quot, sgn, ix;
if ((y > hyp) || (y == 0)) {
return 0;
}
sgn = hyp * y;
if (hyp < 0) {
hyp = -hyp;
}
if (y < 0) {
y = -y;
}
quot = (y * 10000) / hyp;
if (quot > 9999) {
quot = 9999;
}
for (ix = 0; ix < 90; ix++)
if ((quot < cosine[ix]) && (quot >= cosine[ix + 1])) {
break;
}
if (sgn < 0) {
return -(90 - ix);
} else {
return 90 - ix;
}
}
int acos_zelf(int x, int hyp)
{
int quot, sgn, ix;
if (x > hyp) {
return 0;
}
if (x == 0) {
if (hyp < 0) {
return -90;
} else {
return 90;
}
return 0;
}
sgn = hyp * x;
if (hyp < 0) {
hyp = -hyp;
}
if (x < 0) {
x = -x;
}
quot = (x * 10000) / hyp;
if (quot > 9999) {
quot = 9999;
}
for (ix = 0; ix < 90; ix++)
if ((quot < cosine[ix]) && (quot >= cosine[ix + 1])) {
break;
}
if (sgn < 0) {
return -ix;
} else {
return ix;
}
}
//atan_zelf(y/x) in degrees
int atan_zelf(int y, int x)
{
int angle, flip, t, xy;
if (x < 0) { x = -x; }
if (y < 0) { y = -y; }
flip = 0;
if (x < y) { flip = 1; t = x; x = y; y = t; }
if (x == 0) { return 90; }
xy = (y * 1000) / x;
angle = (360 * xy) / (6283 + ((((1764 * xy) / 1000) * xy) / 1000));
if (flip) { angle = 90 - angle; }
return angle;
}
unsigned int isqrt(unsigned int val)
{
unsigned int temp, g = 0, b = 0x8000, bshft = 15;
do {
if (val >= (temp = (((g << 1) + b) << bshft--))) {
g += b;
val -= temp;
}
} while (b >>= 1);
return g;
}
@@ -1,7 +0,0 @@
int cos_zelf(int ix);
int sin_zelf(int);
int tan_zelf(int);
int acos_zelf(int, int);
int asin_zelf(int, int);
int atan_zelf(int, int);
unsigned int isqrt(unsigned int);
@@ -415,20 +415,29 @@ void MakeTables(int q)
}
}
uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t quality_factor, uint32_t image_format,
uint32_t image_width, uint32_t image_height, bool_t add_dri_header)
/**
* Encode an YUV422 image
* @param[in] *in The input image
* @param[out] *out The output JPEG image
* @param[in] quality_factor Quality factor of the encoding (0-99)
* @param[in] add_dri_header Add the DRI header (needed for full JPEG)
*/
void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality_factor, bool_t add_dri_header)
{
uint16_t i, j;
uint8_t *output_ptr = out->buf;
uint8_t *input_ptr = in->buf;
uint32_t image_format = FOUR_ZERO_ZERO;
if (in->type == IMAGE_YUV422) {
image_format = FOUR_TWO_TWO;
}
JPEG_ENCODER_STRUCTURE JpegStruct;
JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure = &JpegStruct;
/* Initialization of JPEG control structure */
jpeg_initialization(jpeg_encoder_structure, image_format, image_width, image_height);
jpeg_initialization(jpeg_encoder_structure, image_format, in->w, in->h);
/* Quantization Table Initialization */
//jpeg_initialize_quantization_tables (quality_factor);
@@ -437,7 +446,7 @@ uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t qua
/* Writing Marker Data */
if (add_dri_header) {
output_ptr = jpeg_write_markers(output_ptr, image_format, image_width, image_height);
output_ptr = jpeg_write_markers(output_ptr, image_format, in->w, in->h);
}
for (i = 1; i <= jpeg_encoder_structure->vertical_mcus; i++) {
@@ -469,7 +478,9 @@ uint8_t *jpeg_encode_image(uint8_t *input_ptr, uint8_t *output_ptr, uint32_t qua
/* Close Routine */
output_ptr = jpeg_close_bitstream(output_ptr);
return output_ptr;
out->w = in->w;
out->h = in->h;
out->buf_size = output_ptr - (uint8_t *)out->buf;
}
static uint8_t *jpeg_encodeMCU(JPEG_ENCODER_STRUCTURE *jpeg_encoder_structure, uint32_t image_format, uint8_t *output_ptr)
@@ -22,6 +22,7 @@
#define _CV_ENCODING_JPEG_H
#include "std.h"
#include "lib/vision/image.h"
/* The different type of image encodings */
#define FOUR_ZERO_ZERO 0
@@ -31,15 +32,7 @@
#define RGB 4
/* JPEG encode an image */
unsigned char *jpeg_encode_image(
uint8_t *in,
uint8_t *out,
uint32_t q, // image quality 1-8
uint32_t fmt, // image format code
uint32_t width, // image width
uint32_t height, // image height
bool_t add_dri_header // data only or full jpeg file
);
void jpeg_encode_image(struct image_t *in, struct image_t *out, uint32_t quality_factor, bool_t add_dri_header);
/* Create an SVS header */
int jpeg_create_svs_header(unsigned char *buf, int32_t size, int w);
@@ -62,6 +62,7 @@ uint8_t JpegScanDataCh2B[KJpegCh2ScanDataLen] = {
/**
* Send a test RTP frame
* @param[in] *udp The udp connection to send the test frame over
*/
void rtp_frame_test(struct udp_periph *udp)
{
@@ -86,13 +87,21 @@ void rtp_frame_test(struct udp_periph *udp)
/**
* Send an RTP frame
* @param[in] *udp The UDP connection to send the frame over
* @param[in] *img The image to send over the RTP connection
* @param[in] format_code 0 for YUV422 and 1 for YUV421
* @param[in] quality_code The JPEG encoding quality
* @param[in] has_dri_header Whether we have an DRI header or not
* @param[in] delta_t Time between images (if set to 0 or less it is calculated)
*/
void rtp_frame_send(struct udp_periph *udp, uint8_t *Jpeg, uint32_t JpegLen, int w, int h, uint8_t format_code,
void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_code,
uint8_t quality_code, uint8_t has_dri_header, uint32_t delta_t)
{
static uint32_t packetcounter = 0;
static uint32_t timecounter = 0;
uint32_t offset = 0;
uint32_t jpeg_size = img->buf_size;
uint8_t *jpeg_ptr = img->buf;
#define MAX_PACKET_SIZE 1400
@@ -104,20 +113,20 @@ void rtp_frame_send(struct udp_periph *udp, uint8_t *Jpeg, uint32_t JpegLen, int
}
// Split frame into packets
for (; JpegLen > 0;) {
for (; jpeg_size > 0;) {
uint32_t len = MAX_PACKET_SIZE;
uint8_t lastpacket = 0;
if (JpegLen <= len) {
if (jpeg_size <= len) {
lastpacket = 1;
len = JpegLen;
len = jpeg_size;
}
rtp_packet_send(udp, Jpeg, len, packetcounter, timecounter, offset, lastpacket, w, h, format_code, quality_code,
has_dri_header);
rtp_packet_send(udp, jpeg_ptr, len, packetcounter, timecounter, offset, lastpacket, img->w, img->h, format_code,
quality_code, has_dri_header);
JpegLen -= len;
Jpeg += len;
jpeg_size -= len;
jpeg_ptr += len;
offset += len;
packetcounter++;
}
@@ -133,7 +142,18 @@ void rtp_frame_send(struct udp_periph *udp, uint8_t *Jpeg, uint32_t JpegLen, int
* The RTP timestamp is in units of 90000Hz. The same timestamp MUST
appear in each fragment of a given frame. The RTP marker bit MUST be
set in the last packet of a frame.
*
* @param[in] *udp The UDP socket to send the RTP packet over
* @param[in] *Jpeg JPEG encoded image byte buffer
* @param[in] JpegLen The length of the byte buffer
* @param[in] m_SequenceNumber RTP sequence number
* @param[in] m_Timestamp Timestamp of the image
* @param[in] m_offset 3 byte fragmentation offset for fragmented images
* @param[in] marker_bit RTP marker bit
* @param[in] w The width of the JPEG image
* @param[in] h The height of the image
* @param[in] format_code 0 for YUV422 and 1 for YUV421
* @param[in] quality_code The JPEG encoding quality
* @param[in] has_dri_header Whether we have an DRI header or not
*/
static void rtp_packet_send(
struct udp_periph *udp,
@@ -29,18 +29,11 @@
#define _CV_ENCODING_RTP_H
#include "std.h"
#include "lib/vision/image.h"
#include "mcu_periph/udp.h"
void rtp_frame_send(
struct udp_periph *udp, // socket
uint8_t *Jpeg, uint32_t JpegLen, // jpeg data
int w, int h, // width and height
uint8_t format_code, // 0=422, 1=421
uint8_t quality_code, // 0-99 of 128 for custom (include
uint8_t has_dri_header, // Does Jpeg data include Header Info?
uint32_t delta_t // time step 90kHz
);
void rtp_frame_send(struct udp_periph *udp, struct image_t *img, uint8_t format_code, uint8_t quality_code,
uint8_t has_dri_header, uint32_t delta_t);
void rtp_frame_test(struct udp_periph *udp);
#endif /* _CV_ENCODING_RTP_H */
@@ -1,104 +0,0 @@
#include "socket.h"
#include <netdb.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <errno.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <netdb.h>
#include <errno.h>
#include <netinet/in.h>
# define ADDR_SIZE_TYPE socklen_t
# define SOCKET_ERROR -1
# define IO_SOCKET ioctl
struct UdpSocket *udp_socket(const char *str_ip_out, const int port_out, const int port_in, const int broadcast)
{
struct UdpSocket *me = malloc(sizeof(struct UdpSocket));
int so_reuseaddr = 1;
me->socket_out = socket(AF_INET, SOCK_DGRAM, 0);
setsockopt(me->socket_out, SOL_SOCKET, SO_REUSEADDR,
&so_reuseaddr, sizeof(so_reuseaddr));
/* only set broadcast option if explicitly enabled */
if (broadcast)
setsockopt(me->socket_out, SOL_SOCKET, SO_BROADCAST,
&broadcast, sizeof(broadcast));
me->addr_out.sin_family = AF_INET;
me->addr_out.sin_port = htons(port_out);
me->addr_out.sin_addr.s_addr = inet_addr(str_ip_out);
me->socket_in = socket(AF_INET, SOCK_DGRAM, 0);
setsockopt(me->socket_in, SOL_SOCKET, SO_REUSEADDR,
&so_reuseaddr, sizeof(so_reuseaddr));
me->addr_in.sin_family = AF_INET;
me->addr_in.sin_port = htons(port_in);
me->addr_in.sin_addr.s_addr = htonl(INADDR_ANY);
bind(me->socket_in, (struct sockaddr *)&me->addr_in, sizeof(me->addr_in));
return me;
}
#include <stdio.h>
//#define UDP_MODE MSG_DONTWAIT
#define UDP_MODE 0
int udp_write(struct UdpSocket *me, unsigned char *buf, int len)
{
sendto(me->socket_out, buf, len, UDP_MODE,
(struct sockaddr *)&me->addr_out, sizeof(me->addr_out));
//printf("sendto ret=%d\n",ret);
return len;
}
unsigned long MIN(unsigned long a, unsigned long b);
unsigned long MIN(unsigned long a, unsigned long b)
{
if (a < b) { return a; }
return b;
}
int udp_read(struct UdpSocket *me, unsigned char *buf, int len)
{
unsigned long toread = 0;
int btr = 1; // set to >0 in order to start the reading loop
int newbytes = 0;
int status;
// if socket is connected
for (; btr > 0;) {
// Check Status
status = IO_SOCKET(me->socket_in, FIONREAD, &toread);
if (status == SOCKET_ERROR) {
printf("problem receiving from socket\n");
break;
}
//printf("UDP has %d bytes\n", toread);
if (toread <= 0) {
break;
}
// If status: ok and new data: read it
btr = MIN(toread, (unsigned long)len);
recvfrom(me->socket_in, buf, btr, 0, (struct sockaddr *)&me->addr_in, (socklen_t *) sizeof(me->addr_in));
newbytes += btr;
}
return newbytes;
}
@@ -1,25 +0,0 @@
#ifndef SOCKET_H
#define SOCKET_H
#include <sys/socket.h>
#include <arpa/inet.h>
#define FMS_UNICAST 0
#define FMS_BROADCAST 1
struct UdpSocket {
int socket_in;
int socket_out;
struct sockaddr_in addr_in;
struct sockaddr_in addr_out;
};
extern struct UdpSocket *udp_socket(const char *str_ip_out, const int port_out, const int port_in, const int broadcast);
extern int udp_write(struct UdpSocket *me, unsigned char *buf, int len);
extern int udp_read(struct UdpSocket *me, unsigned char *buf, int len);
#endif /* SOCKET_H */
@@ -46,6 +46,9 @@ static void *v4l2_capture_thread(void *data);
* The main capturing thread
* This thread handles the queue and dequeue of buffers, to make sure only the latest
* image buffer is preserved for image processing.
* @param[in] *data The Video 4 Linux 2 device pointer
* @return 0 on succes, -1 if it isn able to fetch an image,
* -2 on timeout of taking an image, -3 on failing buffer dequeue
*/
static void *v4l2_capture_thread(void *data)
{
@@ -88,6 +91,9 @@ static void *v4l2_capture_thread(void *data)
}
assert(buf.index < dev->buffers_cnt);
// Copy the timestamp
memcpy(&dev->buffers[buf.index].timestamp, &buf.timestamp, sizeof(struct timeval));
// Update the dequeued id
// We need lock because between setting prev_idx and updating the deq_idx the deq_idx could change
pthread_mutex_lock(&dev->mutex);
@@ -113,12 +119,13 @@ static void *v4l2_capture_thread(void *data)
/**
* Initialize a V4L2 subdevice.
* The subdevice name should be something like '/dev/v4l-subdev0'
* The pad and which indicate the way the subdevice should communicate
* with the real device. Which pad it should take.
* Code should be something like V4L2_MBUS_FMT_UYVY8_2X8. See the V4l2
* manual for available codes.
* Width and height are the amount of pixels this subdevice must cover.
* @param[in] *subdev_name The subdevice name (like /dev/v4l-subdev0)
* @param[in] pad,which The way the subdevice should comminicate and be
* connected to the real device.
* @param[in] code The encoding the subdevice uses (like V4L2_MBUS_FMT_UYVY8_2X8,
* see the V4L2 manual for available encodings)
* @param[in] width,height The width and height of the images
* @return Whether the subdevice was successfully initialized
*/
bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t code, uint16_t width, uint16_t height)
{
@@ -160,11 +167,12 @@ bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t
}
/**
* Initialize a V4L2(Video for Linux 2) device
* The device name should be something like "/dev/video1"
* The subdevice name can be empty if there is no subdevice
* The buffer_cnt are the amount of buffers used in memory mapping
* Note that you need to close this device at the end of you program!
* Initialize a V4L2(Video for Linux 2) device.
* Note that the device must be closed with v4l2_close(dev) at the end.
* @param[in] device_name The video device name (like /dev/video1)
* @param[in] width,height The width and height of the images
* @param[in] buffer_cnt The amount of buffers used for mapping
* @return The newly create V4L2 device
*/
struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height, uint8_t buffers_cnt) {
uint8_t i;
@@ -251,7 +259,6 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height
}
// Map the buffer
buffers[i].idx = i;
buffers[i].length = buf.length;
buffers[i].buf = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset);
if (MAP_FAILED == buffers[i].buf) {
@@ -278,9 +285,12 @@ struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height
* Get the latest image buffer and lock it (Thread safe, BLOCKING)
* This functions blocks until image access is granted. This should not take that long, because
* it is only locked while enqueueing an image.
* Make sure you free the image after processing!
* Make sure you free the image after processing with v4l2_image_free()!
* @param[in] *dev The V4L2 video device we want to get an image from
* @param[out] *img The image that we got from the video device
*/
struct v4l2_img_buf *v4l2_image_get(struct v4l2_device *dev) {
void v4l2_image_get(struct v4l2_device *dev, struct image_t *img)
{
uint16_t img_idx = V4L2_IMG_NONE;
// Continu to wait for an image
@@ -299,16 +309,26 @@ struct v4l2_img_buf *v4l2_image_get(struct v4l2_device *dev) {
}
}
// Rreturn the image
return &dev->buffers[img_idx];
// Set the image
img->type = IMAGE_YUV422;
img->w = dev->w;
img->h = dev->h;
img->buf_idx = img_idx;
img->buf_size = dev->buffers[img_idx].length;
img->buf = dev->buffers[img_idx].buf;
memcpy(&img->ts, &dev->buffers[img_idx].timestamp, sizeof(struct timeval));
}
/**
* Get the latest image and lock it (Thread safe, NON BLOCKING)
* This function returns NULL if it can't get access to the current image.
* Make sure you free the image after processing!
* Make sure you free the image after processing with v4l2_image_free())!
* @param[in] *dev The V4L2 video device we want to get an image from
* @param[out] *img The image that we got from the video device
* @return Whether we got an image or not
*/
struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev) {
bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img)
{
uint16_t img_idx = V4L2_IMG_NONE;
// Try to get the current image
@@ -321,17 +341,27 @@ struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev) {
// Check if we really got an image
if (img_idx == V4L2_IMG_NONE) {
return NULL;
return FALSE;
} else {
return &dev->buffers[img_idx];
// Set the image
img->type = IMAGE_YUV422;
img->w = dev->w;
img->h = dev->h;
img->buf_idx = img_idx;
img->buf_size = dev->buffers[img_idx].length;
img->buf = dev->buffers[img_idx].buf;
memcpy(&img->ts, &dev->buffers[img_idx].timestamp, sizeof(struct timeval));
return TRUE;
}
}
/**
* Free the image and enqueue the buffer (Thread safe)
* This must be done after processing the image, because else all buffers are locked
* @param[in] *dev The video for linux device which the image is from
* @param[in] *img The image to free
*/
void v4l2_image_free(struct v4l2_device *dev, struct v4l2_img_buf *img_buf)
void v4l2_image_free(struct v4l2_device *dev, struct image_t *img)
{
struct v4l2_buffer buf;
@@ -339,16 +369,18 @@ void v4l2_image_free(struct v4l2_device *dev, struct v4l2_img_buf *img_buf)
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = img_buf->idx;
buf.index = img->buf_idx;
if (ioctl(dev->fd, VIDIOC_QBUF, &buf) < 0) {
printf("[v4l2] Could not enqueue %d for %s\n", img_buf->idx, dev->name);
printf("[v4l2] Could not enqueue %d for %s\n", img->buf_idx, dev->name);
}
}
/**
* Start capturing images in streaming mode (Thread safe)
* Returns true when successfully started capturing. Not that it also returns
* FALSE when it already is in capturing mode.
* @param[in] *dev The video for linux device to start capturing from
* @return It resturns TRUE if it successfully started capture,
* but keep in mind that if it is already started it will
* return FALSE.
*/
bool_t v4l2_start_capture(struct v4l2_device *dev)
{
@@ -404,9 +436,10 @@ bool_t v4l2_start_capture(struct v4l2_device *dev)
/**
* Stop capturing of the image stream (Thread safe)
* Returns TRUE if it successfully stopped capturing. Note that it also returns FALSE
* when the capturing is already stopped. This function is blocking until capturing
* thread is closed.
* This function is blocking until capturing thread is closed.
* @param[in] *dev The video for linux device to stop capturing
* @return TRUE if it successfully stopped capturing. Note that it also returns FALSE
* when the capturing is already stopped.
*/
bool_t v4l2_stop_capture(struct v4l2_device *dev)
{
@@ -441,6 +474,7 @@ bool_t v4l2_stop_capture(struct v4l2_device *dev)
* Close the V4L2 device (Thread safe)
* This needs to be preformed to clean up all the buffers and close the device.
* Note that this also stops the capturing if it is still capturing.
* @param[in] *dev The video for linux device to close(cleanup)
*/
void v4l2_close(struct v4l2_device *dev)
{
@@ -28,16 +28,20 @@
#ifndef _CV_LIB_V4L2_H
#define _CV_LIB_V4L2_H
#include "std.h"
#include <linux/v4l2-subdev.h>
#include <pthread.h>
#include <sys/time.h>
#include "std.h"
#include "lib/vision/image.h"
#define V4L2_IMG_NONE 255 //< There currently no image available
/* V4L2 memory mapped image buffer */
struct v4l2_img_buf {
uint8_t idx; //< The index of the buffer
size_t length; //< The size of the buffer
void *buf; //< Pointer to the memory mapped buffer
size_t length; //< The size of the buffer
struct timeval timestamp; //< The time value of the image
void *buf; //< Pointer to the memory mapped buffer
};
/* V4L2 device */
@@ -56,9 +60,9 @@ struct v4l2_device {
/* External functions */
bool_t v4l2_init_subdev(char *subdev_name, uint8_t pad, uint8_t which, uint16_t code, uint16_t width, uint16_t height);
struct v4l2_device *v4l2_init(char *device_name, uint16_t width, uint16_t height, uint8_t buffers_cnt);
struct v4l2_img_buf *v4l2_image_get(struct v4l2_device *dev);
struct v4l2_img_buf *v4l2_image_get_nonblock(struct v4l2_device *dev);
void v4l2_image_free(struct v4l2_device *dev, struct v4l2_img_buf *img_buf);
void v4l2_image_get(struct v4l2_device *dev, struct image_t *img);
bool_t v4l2_image_get_nonblock(struct v4l2_device *dev, struct image_t *img);
void v4l2_image_free(struct v4l2_device *dev, struct image_t *img);
bool_t v4l2_start_capture(struct v4l2_device *dev);
bool_t v4l2_stop_capture(struct v4l2_device *dev);
void v4l2_close(struct v4l2_device *dev);
File diff suppressed because it is too large Load Diff
@@ -34,18 +34,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef FAST_H
#define FAST_H
typedef struct { int x, y; } xyFAST;
typedef unsigned char byte;
int fast9_corner_score(const byte *p, const int pixel[], int bstart);
xyFAST *fast9_detect(const byte *im, int xsize, int ysize, int stride, int b, int *ret_num_corners);
int *fast9_score(const byte *i, int stride, xyFAST *corners, int num_corners, int b);
xyFAST *fast9_detect_nonmax(const byte *im, int xsize, int ysize, int stride, int b, int *ret_num_corners);
xyFAST *nonmax_suppression(const xyFAST *corners, const int *scores, int num_corners, int *ret_num_nonmax);
#include "std.h"
#include "lib/vision/image.h"
struct point_t *fast9_detect(struct image_t *img, uint8_t threshold, uint16_t min_dist, uint16_t x_padding, uint16_t y_padding, uint16_t *num_corners);
#endif
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,83 @@
/*
* Copyright (C) 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/**
* @file modules/computer_vision/lib/vision/image.h
* Image helper functions like resizing, color filter, converters...
*/
#ifndef _CV_LIB_VISION_IMAGE_H
#define _CV_LIB_VISION_IMAGE_H
#include "std.h"
#include <sys/time.h>
/* The different type of images we currently support */
enum image_type {
IMAGE_YUV422, //< UYVY format (uint16 per pixel)
IMAGE_GRAYSCALE, //< Grayscale image with only the Y part (uint8 per pixel)
IMAGE_JPEG, //< An JPEG encoded image (not per pixel encoded)
IMAGE_GRADIENT //< An image gradient (int16 per pixel)
};
/* Main image structure */
struct image_t {
enum image_type type; //< The image type
uint16_t w; //< Image width
uint16_t h; //< Image height
struct timeval ts; //< The timestamp of creation
uint8_t buf_idx; //< Buffer index for V4L2 freeing
uint32_t buf_size; //< The buffer size
void *buf; //< Image buffer (depending on the image_type)
};
/* Image point structure */
struct point_t {
uint16_t x; //< The x coordinate of the point
uint16_t y; //< The y coordinate of the point
};
/* Vector structure for point differences */
struct flow_t {
struct point_t pos; //< The original position the flow comes from
int16_t flow_x; //< The x direction flow in subpixels
int16_t flow_y; //< The y direction flow in subpixels
};
/* Usefull image functions */
void image_create(struct image_t *img, uint16_t width, uint16_t height, enum image_type type);
void image_free(struct image_t *img);
void image_copy(struct image_t *input, struct image_t *output);
void image_switch(struct image_t *a, struct image_t *b);
void image_to_grayscale(struct image_t *input, struct image_t *output);
uint16_t image_yuv422_colorfilt(struct image_t *input, struct image_t *output, uint8_t y_m, uint8_t y_M, uint8_t u_m, uint8_t u_M, uint8_t v_m, uint8_t v_M);
void image_yuv422_downsample(struct image_t *input, struct image_t *output, uint16_t downsample);
void image_subpixel_window(struct image_t *input, struct image_t *output, struct point_t *center, uint16_t subpixel_factor);
void image_gradients(struct image_t *input, struct image_t *dx, struct image_t *dy);
void image_calculate_g(struct image_t *dx, struct image_t *dy, int32_t *g);
uint32_t image_difference(struct image_t *img_a, struct image_t *img_b, struct image_t *diff);
int32_t image_multiply(struct image_t *img_a, struct image_t *img_b, struct image_t *mult);
void image_show_points(struct image_t *img, struct point_t *points, uint16_t points_cnt);
void image_show_flow(struct image_t *img, struct flow_t *vectors, uint16_t points_cnt, uint8_t subpixel_factor);
void image_draw_line(struct image_t *img, struct point_t *from, struct point_t *to);
#endif
@@ -0,0 +1,181 @@
/*
* Copyright (C) 2014 G. de Croon
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/lib/vision/lucas_kanade.c
* @brief efficient fixed-point optical-flow calculation
*
* - Initial fixed-point C implementation by G. de Croon
* - Algorithm: Lucas-Kanade by Yves Bouguet
* - Publication: http://robots.stanford.edu/cs223b04/algo_tracking.pdf
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "lucas_kanade.h"
/**
* Compute the optical flow of several points using the Lucas-Kanade algorithm by Yves Bouguet
* The initial fixed-point implementation is doen by G. de Croon and is adapted by
* Freek van Tienen for the implementation in Paparazzi.
* @param[in] *new_img The newest grayscale image (TODO: fix YUV422 support)
* @param[in] *old_img The old grayscale image (TODO: fix YUV422 support)
* @param[in] *points Points to start tracking from
* @param[in/out] points_cnt The amount of points and it returns the amount of points tracked
* @param[in] half_window_size Half the window size (in both x and y direction) to search inside
* @param[in] subpixel_factor The subpixel factor which calculations should be based on
* @param[in] max_iteration Maximum amount of iterations to find the new point
* @param[in] step_threshold The threshold at which the iterations should stop
* @param[in] max_point The maximum amount of points to track, we skip x points and then take a point.
* @return The vectors from the original *points in subpixels
*/
struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt,
uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points) {
// A straightforward one-level implementation of Lucas-Kanade.
// For all points:
// (1) determine the subpixel neighborhood in the old image
// (2) get the x- and y- gradients
// (3) determine the 'G'-matrix [sum(Axx) sum(Axy); sum(Axy) sum(Ayy)], where sum is over the window
// (4) iterate over taking steps in the image to minimize the error:
// [a] get the subpixel neighborhood in the new image
// [b] determine the image difference between the two neighborhoods
// [c] calculate the 'b'-vector
// [d] calculate the additional flow step and possibly terminate the iteration
// Allocate some memory for returning the vectors
struct flow_t *vectors = malloc(sizeof(struct flow_t) * max_points);
uint16_t new_p = 0;
uint16_t points_orig = *points_cnt;
*points_cnt = 0;
// determine patch sizes and initialize neighborhoods
uint16_t patch_size = 2 * half_window_size;
uint32_t error_threshold = (25 * 25) *(patch_size *patch_size);
uint16_t padded_patch_size = patch_size + 2;
// Create the window images
struct image_t window_I, window_J, window_DX, window_DY, window_diff;
image_create(&window_I, padded_patch_size, padded_patch_size, IMAGE_GRAYSCALE);
image_create(&window_J, patch_size, patch_size, IMAGE_GRAYSCALE);
image_create(&window_DX, patch_size, patch_size, IMAGE_GRADIENT);
image_create(&window_DY, patch_size, patch_size, IMAGE_GRADIENT);
image_create(&window_diff, patch_size, patch_size, IMAGE_GRADIENT);
// Calculate the amount of points to skip
float skip_points = (points_orig > max_points) ? points_orig / max_points : 1;
// Go trough all points
for (uint16_t i = 0; i < max_points && i < points_orig; i++) {
uint16_t p = i * skip_points;
// If the pixel is outside ROI, do not track it
if (points[p].x < half_window_size || (old_img->w - points[p].x) < half_window_size
|| points[p].y < half_window_size || (old_img->h - points[p].y) < half_window_size) {
continue;
}
// Convert the point to a subpixel coordinate
vectors[new_p].pos.x = points[p].x * subpixel_factor;
vectors[new_p].pos.y = points[p].y * subpixel_factor;
vectors[new_p].flow_x = 0;
vectors[new_p].flow_y = 0;
// (1) determine the subpixel neighborhood in the old image
image_subpixel_window(old_img, &window_I, &vectors[new_p].pos, subpixel_factor);
// (2) get the x- and y- gradients
image_gradients(&window_I, &window_DX, &window_DY);
// (3) determine the 'G'-matrix [sum(Axx) sum(Axy); sum(Axy) sum(Ayy)], where sum is over the window
int32_t G[4];
image_calculate_g(&window_DX, &window_DY, G);
// calculate G's determinant in subpixel units:
int32_t Det = (G[0] * G[3] - G[1] * G[2]) / subpixel_factor;
// Check if the determinant is bigger than 1
if (Det < 1) {
continue;
}
// a * (Ax - Bx) + (1-a) * (Ax+1 - Bx+1)
// a * Ax - a * Bx + (1-a) * Ax+1 - (1-a) * Bx+1
// (a * Ax + (1-a) * Ax+1) - (a * Bx + (1-a) * Bx+1)
// (4) iterate over taking steps in the image to minimize the error:
bool_t tracked = TRUE;
for (uint8_t it = 0; it < max_iterations; it++) {
struct point_t new_point = {
vectors[new_p].pos.x + vectors[new_p].flow_x,
vectors[new_p].pos.y + vectors[new_p].flow_y
};
// If the pixel is outside ROI, do not track it
if (new_point.x / subpixel_factor < half_window_size || (old_img->w - new_point.x / subpixel_factor) < half_window_size
|| new_point.y / subpixel_factor < half_window_size || (old_img->h - new_point.y / subpixel_factor) < half_window_size) {
tracked = FALSE;
break;
}
// [a] get the subpixel neighborhood in the new image
image_subpixel_window(new_img, &window_J, &new_point, subpixel_factor);
// [b] determine the image difference between the two neighborhoods
uint32_t error = image_difference(&window_I, &window_J, &window_diff);
if (error > error_threshold && it > max_iterations / 2) {
tracked = FALSE;
break;
}
int32_t b_x = image_multiply(&window_diff, &window_DX, NULL) / 255;
int32_t b_y = image_multiply(&window_diff, &window_DY, NULL) / 255;
// [d] calculate the additional flow step and possibly terminate the iteration
int16_t step_x = (G[3] * b_x - G[1] * b_y) / Det;
int16_t step_y = (G[0] * b_y - G[2] * b_x) / Det;
vectors[new_p].flow_x += step_x;
vectors[new_p].flow_y += step_y;
// Check if we exceeded the treshold
if ((abs(step_x) + abs(step_y)) < step_threshold) {
break;
}
}
// If we tracked the point we update the index and the count
if (tracked) {
new_p++;
(*points_cnt)++;
}
}
// Free the images
image_free(&window_I);
image_free(&window_J);
image_free(&window_DX);
image_free(&window_DY);
image_free(&window_diff);
// Return the vectors
return vectors;
}
@@ -0,0 +1,40 @@
/*
* Copyright (C) 2014 G. de Croon
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/lib/vision/lucas_kanade.h
* @brief efficient fixed-point optical-flow calculation
*
* - Initial fixed-point C implementation by G. de Croon
* - Algorithm: Lucas-Kanade by Yves Bouguet
* - Publication: http://robots.stanford.edu/cs223b04/algo_tracking.pdf
*/
#ifndef OPTIC_FLOW_INT_H
#define OPTIC_FLOW_INT_H
#include "std.h"
#include "image.h"
struct flow_t *opticFlowLK(struct image_t *new_img, struct image_t *old_img, struct point_t *points, uint16_t *points_cnt,
uint16_t half_window_size, uint16_t subpixel_factor, uint8_t max_iterations, uint8_t step_threshold, uint16_t max_points);
#endif /* OPTIC_FLOW_INT_H */
@@ -1,213 +0,0 @@
/*
* Copyright (C) 2014 Hann Woei Ho
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/hover_stabilization.c
* @brief optical-flow based hovering for Parrot AR.Drone 2.0
*
* Control loops for optic flow based hovering.
* Computes setpoint for the lower level attitude stabilization to control horizontal velocity.
*/
// Own Header
#include "hover_stabilization.h"
// Stabilization
#include "firmwares/rotorcraft/stabilization/stabilization_attitude.h"
#include "firmwares/rotorcraft/guidance/guidance_v.h"
#include "autopilot.h"
// Downlink
#include "subsystems/datalink/downlink.h"
// Controller Gains
/* error if some gains are negative */
#if (VISION_PHI_PGAIN < 0) || \
(VISION_PHI_IGAIN < 0) || \
(VISION_THETA_PGAIN < 0) || \
(VISION_THETA_IGAIN < 0)
#error "ALL control gains have to be positive!!!"
#endif
bool activate_opticflow_hover;
float vision_desired_vx;
float vision_desired_vy;
int32_t vision_phi_pgain;
int32_t vision_phi_igain;
int32_t vision_theta_pgain;
int32_t vision_theta_igain;
// Controller Commands
struct Int32Eulers cmd_euler;
// Hover Stabilization
float Velx_Int;
float Vely_Int;
float Error_Velx;
float Error_Vely;
#define CMD_OF_SAT 1500 // 40 deg = 2859.1851
unsigned char saturateX = 0, saturateY = 0;
unsigned int set_heading;
#ifndef VISION_HOVER
#define VISION_HOVER TRUE
#endif
#ifndef VISION_PHI_PGAIN
#define VISION_PHI_PGAIN 500.
#endif
#ifndef VISION_PHI_IGAIN
#define VISION_PHI_IGAIN 10.
#endif
#ifndef VISION_THETA_PGAIN
#define VISION_THETA_PGAIN 500.
#endif
#ifndef VISION_THETA_IGAIN
#define VISION_THETA_IGAIN 10.
#endif
#ifndef VISION_DESIRED_VX
#define VISION_DESIRED_VX 0.
#endif
#ifndef VISION_DESIRED_VY
#define VISION_DESIRED_VY 0.
#endif
void run_opticflow_hover(void);
void guidance_h_module_enter(void)
{
// INIT
Velx_Int = 0;
Vely_Int = 0;
// GUIDANCE: Set Hover-z-hold
guidance_v_z_sp = -1;
}
void guidance_h_module_read_rc(void)
{
// Do not read RC
// Setpoint being set by vision
}
void guidance_h_module_run(bool_t in_flight)
{
// Run
// Setpoint being set by vision
stabilization_attitude_run(in_flight);
}
void init_hover_stabilization_onvision()
{
INT_EULERS_ZERO(cmd_euler);
activate_opticflow_hover = VISION_HOVER;
vision_phi_pgain = VISION_PHI_PGAIN;
vision_phi_igain = VISION_PHI_IGAIN;
vision_theta_pgain = VISION_THETA_PGAIN;
vision_theta_igain = VISION_THETA_IGAIN;
vision_desired_vx = VISION_DESIRED_VX;
vision_desired_vy = VISION_DESIRED_VY;
set_heading = 1;
Error_Velx = 0;
Error_Vely = 0;
Velx_Int = 0;
Vely_Int = 0;
}
void run_hover_stabilization_onvision(struct CVresults* vision )
{
struct FloatVect3 V_body;
if (activate_opticflow_hover == TRUE) {
// Compute body velocities from ENU
struct FloatVect3 *vel_ned = (struct FloatVect3*)stateGetSpeedNed_f();
struct FloatQuat *q_n2b = stateGetNedToBodyQuat_f();
float_quat_vmult(&V_body, q_n2b, vel_ned);
}
// *************************************************************************************
// Downlink Message
// *************************************************************************************
DOWNLINK_SEND_OF_HOVER(DefaultChannel, DefaultDevice,
&vision->FPS, &vision->dx_sum, &vision->dy_sum, &vision->OFx, &vision->OFy,
&vision->diff_roll, &vision->diff_pitch,
&vision->Velx, &vision->Vely,
&V_body.x, &V_body.y,
&vision->cam_h, &vision->count);
if (autopilot_mode != AP_MODE_MODULE) {
return;
}
if (vision->flow_count) {
Error_Velx = vision->Velx - vision_desired_vx;
Error_Vely = vision->Vely - vision_desired_vy;
} else {
Error_Velx = 0;
Error_Vely = 0;
}
if (saturateX == 0) {
if (activate_opticflow_hover == TRUE) {
Velx_Int += vision_theta_igain * Error_Velx;
} else {
Velx_Int += vision_theta_igain * V_body.x;
}
}
if (saturateY == 0) {
if (activate_opticflow_hover == TRUE) {
Vely_Int += vision_phi_igain * Error_Vely;
} else {
Vely_Int += vision_phi_igain * V_body.y;
}
}
if (set_heading) {
cmd_euler.psi = stateGetNedToBodyEulers_i()->psi;
set_heading = 0;
}
if (activate_opticflow_hover == TRUE) {
cmd_euler.phi = - (vision_phi_pgain * Error_Vely + Vely_Int);
cmd_euler.theta = (vision_theta_pgain * Error_Velx + Velx_Int);
} else {
cmd_euler.phi = - (vision_phi_pgain * V_body.y + Vely_Int);
cmd_euler.theta = (vision_theta_pgain * V_body.x + Velx_Int);
}
saturateX = 0; saturateY = 0;
if (cmd_euler.phi < -CMD_OF_SAT) {cmd_euler.phi = -CMD_OF_SAT; saturateX = 1;}
else if (cmd_euler.phi > CMD_OF_SAT) {cmd_euler.phi = CMD_OF_SAT; saturateX = 1;}
if (cmd_euler.theta < -CMD_OF_SAT) {cmd_euler.theta = -CMD_OF_SAT; saturateY = 1;}
else if (cmd_euler.theta > CMD_OF_SAT) {cmd_euler.theta = CMD_OF_SAT; saturateY = 1;}
stabilization_attitude_set_rpy_setpoint_i(&cmd_euler);
DOWNLINK_SEND_VISION_STABILIZATION(DefaultChannel, DefaultDevice, &vision->Velx, &vision->Vely, &Velx_Int,
&Vely_Int, &cmd_euler.phi, &cmd_euler.theta);
}
@@ -1,60 +0,0 @@
/*
* Copyright (C) 2014 Hann Woei Ho
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/hover_stabilization.h
* @brief optical-flow based hovering for Parrot AR.Drone 2.0
*
* Control loops for optic flow based hovering.
* Computes setpoint for the lower level attitude stabilization to control horizontal velocity.
*/
#ifndef HOVER_STABILIZATION_H_
#define HOVER_STABILIZATION_H_
#include <std.h>
#include "inter_thread_data.h"
// Controller module
// Vertical loop re-uses Alt-hold
#define GUIDANCE_V_MODE_MODULE_SETTING GUIDANCE_V_MODE_HOVER
// Horizontal mode is a specific controller
#define GUIDANCE_H_MODE_MODULE_SETTING GUIDANCE_H_MODE_MODULE
// Implement own Horizontal loops
extern void guidance_h_module_enter(void);
extern void guidance_h_module_read_rc(void);
extern void guidance_h_module_run(bool_t in_flight);
void init_hover_stabilization_onvision(void);
void run_hover_stabilization_onvision(struct CVresults *vision);
extern bool activate_opticflow_hover;
extern float vision_desired_vx;
extern float vision_desired_vy;
extern int32_t vision_phi_pgain;
extern int32_t vision_phi_igain;
extern int32_t vision_theta_pgain;
extern int32_t vision_theta_igain;
#endif /* HOVER_STABILIZATION_H_ */
@@ -29,28 +29,26 @@
#ifndef _INTER_THREAD_DATA_H
#define _INTER_THREAD_DATA_H
/// Data from thread to module
struct CVresults {
int cnt; // Number of processed frames
/* The result calculated from the opticflow */
struct opticflow_result_t {
float fps; //< Frames per second of the optical flow calculation
uint16_t corner_cnt; //< The amount of coners found by FAST9
uint16_t tracked_cnt; //< The amount of tracked corners
float Velx; // Velocity as measured by camera
float Vely;
int flow_count;
int16_t flow_x; //< Flow in x direction from the camera (in subpixels)
int16_t flow_y; //< Flow in y direction from the camera (in subpixels)
int16_t flow_der_x; //< The derotated flow calculation in the x direction (in subpixels)
int16_t flow_der_y; //< The derotated flow calculation in the y direction (in subpixels)
float cam_h; // Debug parameters
int count;
float OFx, OFy, dx_sum, dy_sum;
float diff_roll;
float diff_pitch;
float FPS;
float vel_x; //< The velocity in the x direction
float vel_y; //< The velocity in the y direction
};
/// Data from module to thread
struct PPRZinfo {
int cnt; // IMU msg counter
float phi; // roll [rad]
float theta; // pitch [rad]
float agl; // height above ground [m]
/* The state of the drone when it took an image */
struct opticflow_state_t {
float phi; //< roll [rad]
float theta; //< pitch [rad]
float agl; //< height above ground [m]
};
#endif
@@ -0,0 +1,272 @@
/*
* Copyright (C) 2014 Hann Woei Ho
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/opticflow_calculator.c
* @brief Estimate velocity from optic flow.
*
* Using images from a vertical camera and IMU sensor data.
*/
#include "std.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// Own Header
#include "opticflow_calculator.h"
// Computer Vision
#include "lib/vision/image.h"
#include "lib/vision/lucas_kanade.h"
#include "lib/vision/fast_rosten.h"
// Camera parameters (defaults are from an ARDrone 2)
#ifndef OPTICFLOW_FOV_W
#define OPTICFLOW_FOV_W 0.89360857702
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FOV_W);
#ifndef OPTICFLOW_FOV_H
#define OPTICFLOW_FOV_H 0.67020643276
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FOV_H);
#ifndef OPTICFLOW_FX
#define OPTICFLOW_FX 343.1211
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FX);
#ifndef OPTICFLOW_FY
#define OPTICFLOW_FY 348.5053
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FY);
/* Set the default values */
#ifndef OPTICFLOW_MAX_TRACK_CORNERS
#define OPTICFLOW_MAX_TRACK_CORNERS 25
#endif
PRINT_CONFIG_VAR(OPTICFLOW_MAX_TRACK_CORNERS);
#ifndef OPTICFLOW_WINDOW_SIZE
#define OPTICFLOW_WINDOW_SIZE 10
#endif
PRINT_CONFIG_VAR(OPTICFLOW_WINDOW_SIZE);
#ifndef OPTICFLOW_SUBPIXEL_FACTOR
#define OPTICFLOW_SUBPIXEL_FACTOR 10
#endif
PRINT_CONFIG_VAR(OPTICFLOW_SUBPIXEL_FACTOR);
#ifndef OPTICFLOW_MAX_ITERATIONS
#define OPTICFLOW_MAX_ITERATIONS 10
#endif
PRINT_CONFIG_VAR(OPTICFLOW_MAX_ITERATIONS);
#ifndef OPTICFLOW_THRESHOLD_VEC
#define OPTICFLOW_THRESHOLD_VEC 2
#endif
PRINT_CONFIG_VAR(OPTICFLOW_THRESHOLD_VEC);
#ifndef OPTICFLOW_FAST9_ADAPTIVE
#define OPTICFLOW_FAST9_ADAPTIVE TRUE
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FAST9_ADAPTIVE);
#ifndef OPTICFLOW_FAST9_THRESHOLD
#define OPTICFLOW_FAST9_THRESHOLD 20
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FAST9_THRESHOLD);
#ifndef OPTICFLOW_FAST9_MIN_DISTANCE
#define OPTICFLOW_FAST9_MIN_DISTANCE 10
#endif
PRINT_CONFIG_VAR(OPTICFLOW_FAST9_MIN_DISTANCE);
/* Functions only used here */
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime);
static int cmp_flow(const void *a, const void *b);
/**
* Initialize the opticflow calculator
* @param[out] *opticflow The new optical flow calculator
* @param[in] *w The image width
* @param[in] *h The image height
*/
void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h)
{
/* Create the image buffers */
image_create(&opticflow->img_gray, w, h, IMAGE_GRAYSCALE);
image_create(&opticflow->prev_img_gray, w, h, IMAGE_GRAYSCALE);
/* Set the previous values */
opticflow->got_first_img = FALSE;
opticflow->prev_phi = 0.0;
opticflow->prev_theta = 0.0;
/* Set the default values */
opticflow->max_track_corners = OPTICFLOW_MAX_TRACK_CORNERS;
opticflow->window_size = OPTICFLOW_WINDOW_SIZE;
opticflow->subpixel_factor = OPTICFLOW_SUBPIXEL_FACTOR;
opticflow->max_iterations = OPTICFLOW_MAX_ITERATIONS;
opticflow->threshold_vec = OPTICFLOW_THRESHOLD_VEC;
opticflow->fast9_adaptive = OPTICFLOW_FAST9_ADAPTIVE;
opticflow->fast9_threshold = OPTICFLOW_FAST9_THRESHOLD;
opticflow->fast9_min_distance = OPTICFLOW_FAST9_MIN_DISTANCE;
}
/**
* Run the optical flow on a new image frame
* @param[in] *opticflow The opticalflow structure that keeps track of previous images
* @param[in] *state The state of the drone
* @param[in] *img The image frame to calculate the optical flow from
* @param[out] *result The optical flow result
*/
void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result)
{
// Update FPS for information
result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval));
// Convert image to grayscale
image_to_grayscale(img, &opticflow->img_gray);
// Copy to previous image if not set
if (!opticflow->got_first_img) {
image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
opticflow->got_first_img = TRUE;
}
// *************************************************************************************
// Corner detection
// *************************************************************************************
// FAST corner detection (TODO: non fixed threashold)
struct point_t *corners = fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance,
20, 20, &result->corner_cnt);
// Adaptive threshold
if (opticflow->fast9_adaptive) {
// Decrease and increase the threshold based on previous values
if (result->corner_cnt < 40 && opticflow->fast9_threshold > 5) {
opticflow->fast9_threshold--;
} else if (result->corner_cnt > 50 && opticflow->fast9_threshold < 60) {
opticflow->fast9_threshold++;
}
}
#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS
image_show_points(img, corners, result->corner_cnt);
#endif
// Check if we found some corners to track
if (result->corner_cnt < 1) {
free(corners);
image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
return;
}
// *************************************************************************************
// Corner Tracking
// *************************************************************************************
// Execute a Lucas Kanade optical flow
result->tracked_cnt = result->corner_cnt;
struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt,
opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
opticflow->threshold_vec, opticflow->max_track_corners);
#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW
image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
#endif
// Get the median flow
qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
if (result->tracked_cnt == 0) {
// We got no flow
result->flow_x = 0;
result->flow_y = 0;
} else if (result->tracked_cnt > 3) {
// Take the average of the 3 median points
result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
result->flow_x /= 3;
result->flow_y /= 3;
} else {
// Take the median point
result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
}
// Flow Derotation
float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;
result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor;
result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor;
opticflow->prev_phi = state->phi;
opticflow->prev_theta = state->theta;
// Velocity calculation
result->vel_x = -result->flow_der_x * result->fps / opticflow->subpixel_factor * img->w / OPTICFLOW_FX;
result->vel_y = result->flow_der_y * result->fps / opticflow->subpixel_factor * img->h / OPTICFLOW_FY;
// *************************************************************************************
// Next Loop Preparation
// *************************************************************************************
free(corners);
free(vectors);
image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
}
/**
* Calculate the difference from start till finish
* @param[in] *starttime The start time to calculate the difference from
* @param[in] *finishtime The finish time to calculate the difference from
* @return The difference in milliseconds
*/
static uint32_t timeval_diff(struct timeval *starttime, struct timeval *finishtime)
{
uint32_t msec;
msec = (finishtime->tv_sec - starttime->tv_sec) * 1000;
msec += (finishtime->tv_usec - starttime->tv_usec) / 1000;
return msec;
}
/**
* Compare two flow vectors based on flow distance
* Used for sorting.
* @param[in] *a The first flow vector (should be vect flow_t)
* @param[in] *b The second flow vector (should be vect flow_t)
* @return Negative if b has more flow than a, 0 if the same and positive if a has more flow than b
*/
static int cmp_flow(const void *a, const void *b)
{
const struct flow_t *a_p = (const struct flow_t *)a;
const struct flow_t *b_p = (const struct flow_t *)b;
return (a_p->flow_x * a_p->flow_x + a_p->flow_y * a_p->flow_y) - (b_p->flow_x * b_p->flow_x + b_p->flow_y * b_p->flow_y);
}
@@ -0,0 +1,60 @@
/*
* Copyright (C) 2014 Hann Woei Ho
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/opticflow_calculator.h
* @brief Calculate velocity from optic flow.
*
* Using images from a vertical camera and IMU sensor data.
*/
#ifndef OPTICFLOW_CALCULATOR_H
#define OPTICFLOW_CALCULATOR_H
#include "std.h"
#include "inter_thread_data.h"
#include "lib/vision/image.h"
#include "lib/v4l/v4l2.h"
struct opticflow_t {
bool_t got_first_img; //< If we got a image to work with
float prev_phi; //< Phi from the previous image frame
float prev_theta; //< Theta from the previous image frame
struct image_t img_gray; //< Current gray image frame
struct image_t prev_img_gray; //< Previous gray image frame
struct timeval prev_timestamp; //< Timestamp of the previous frame, used for FPS calculation
uint8_t max_track_corners; //< Maximum amount of corners Lucas Kanade should track
uint16_t window_size; //< Window size of the Lucas Kanade calculation (needs to be even)
uint8_t subpixel_factor; //< The amount of subpixels per pixel
uint8_t max_iterations; //< The maximum amount of iterations the Lucas Kanade algorithm should do
uint8_t threshold_vec; //< The threshold in x, y subpixels which the algorithm should stop
bool_t fast9_adaptive; //< Whether the FAST9 threshold should be adaptive
uint8_t fast9_threshold; //< FAST9 corner detection threshold
uint16_t fast9_min_distance; //< Minimum distance in pixels between corners
};
void opticflow_calc_init(struct opticflow_t *opticflow, uint16_t w, uint16_t h);
void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result);
#endif /* OPTICFLOW_CALCULATOR_H */
@@ -1,154 +0,0 @@
/*
* Copyright (C) 2015 The Paparazzi Community
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/opticflow_thread.c
*
*/
// Sockets
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "opticflow_thread.h"
/////////////////////////////////////////////////////////////////////////
// COMPUTER VISION THREAD
// Video
#include "v4l/v4l2.h"
#include "resize.h"
// Payload Code
#include "visual_estimator.h"
// Downlink Video
//#define DOWNLINK_VIDEO 1
#ifdef DOWNLINK_VIDEO
#include "encoding/jpeg.h"
#include "encoding/rtp.h"
#endif
#include <stdio.h>
#define DEBUG_INFO(X, ...) ;
static volatile enum{RUN,EXIT} computer_vision_thread_command = RUN; /** request to close: set to 1 */
void computervision_thread_request_exit(void) {
computer_vision_thread_command = EXIT;
}
void *computervision_thread_main(void *args)
{
int thread_socket = *(int *) args;
// Local data in/out
struct CVresults vision_results;
struct PPRZinfo autopilot_data;
// Status
computer_vision_thread_command = RUN;
/* On ARDrone2:
* video1 = front camera; video2 = bottom camera
*/
// Create a V4L2 device
struct v4l2_device *dev = v4l2_init("/dev/video2", 320, 240, 10);
if (dev == NULL) {
printf("Error initialising video\n");
return 0;
}
// Start the streaming on the V4L2 device
if(!v4l2_start_capture(dev)) {
printf("Could not start capture\n");
return 0;
}
#ifdef DOWNLINK_VIDEO
// Video Compression
uint8_t *jpegbuf = (uint8_t *)malloc(dev->w * dev->h * 2);
// Network Transmit
struct UdpSocket *vsock;
//#define FMS_UNICAST 0
//#define FMS_BROADCAST 1
vsock = udp_socket("192.168.1.255", 5000, 5001, FMS_BROADCAST);
#endif
// First Apply Settings before init
opticflow_plugin_init(dev->w, dev->h, &vision_results);
while (computer_vision_thread_command == RUN) {
// Wait for a new frame
struct v4l2_img_buf *img = v4l2_image_get(dev);
// Get most recent State information
int bytes_read = sizeof(autopilot_data);
while (bytes_read == sizeof(autopilot_data))
{
bytes_read = recv(thread_socket, &autopilot_data, sizeof(autopilot_data), MSG_DONTWAIT);
if (bytes_read != sizeof(autopilot_data)) {
if (bytes_read != -1) {
printf("[thread] Failed to read %d bytes PPRZ info from socket.\n",bytes_read);
}
}
}
DEBUG_INFO("[thread] Read # %d\n",autopilot_data.cnt);
// Run Image Processing with image and data and get results
opticflow_plugin_run(img->buf, &autopilot_data, &vision_results);
//printf("Vision result %f %f\n", vision_results.Velx, vision_results.Vely);
/* Send results to main */
vision_results.cnt++;
int bytes_written = write(thread_socket, &vision_results, sizeof(vision_results));
if (bytes_written != sizeof(vision_results)){
perror("[thread] Failed to write to socket.\n");
}
DEBUG_INFO("[thread] Write # %d, (bytes %d)\n",vision_results.cnt, bytes_written);
#ifdef DOWNLINK_VIDEO
// JPEG encode the image:
uint32_t quality_factor = 10; //20 if no resize,
uint8_t dri_header = 0;
uint32_t image_format = FOUR_TWO_TWO; // format (in jpeg.h)
uint8_t *end = encode_image(img->buf, jpegbuf, quality_factor, image_format, dev->w, dev->h, dri_header);
uint32_t size = end - (jpegbuf);
//printf("Sending an image ...%u\n", size);
send_rtp_frame(vsock, jpegbuf, size, dev->w, dev->h, 0, quality_factor, dri_header, 0);
#endif
// Free the image
v4l2_image_free(dev, img);
}
printf("Thread Closed\n");
v4l2_close(dev);
return 0;
}
@@ -1,33 +0,0 @@
/*
* Copyright (C) 2015 The Paparazzi Community
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/opticflow_thread.h
* @brief computer vision thread
*
*/
#ifndef OPTICFLOW_THREAD_H
#define OPTICFLOW_THREAD_H
void *computervision_thread_main(void *args); /* computer vision thread: should be given a pointer to a socketpair as argument */
void computervision_thread_request_exit(void);
#endif
@@ -0,0 +1,158 @@
/*
* Copyright (C) 2014 Hann Woei Ho
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/stabilization_opticflow.c
* @brief Optical-flow based control for Linux based systems
*
* Control loops for optic flow based hovering.
* Computes setpoint for the lower level attitude stabilization to control horizontal velocity.
*/
// Own Header
#include "stabilization_opticflow.h"
// Stabilization
#include "firmwares/rotorcraft/stabilization/stabilization_attitude.h"
#include "firmwares/rotorcraft/guidance/guidance_v.h"
#include "autopilot.h"
#include "subsystems/datalink/downlink.h"
#define CMD_OF_SAT 1500 // 40 deg = 2859.1851
#ifndef VISION_PHI_PGAIN
#define VISION_PHI_PGAIN 400
#endif
PRINT_CONFIG_VAR(VISION_PHI_PGAIN);
#ifndef VISION_PHI_IGAIN
#define VISION_PHI_IGAIN 20
#endif
PRINT_CONFIG_VAR(VISION_PHI_IGAIN);
#ifndef VISION_THETA_PGAIN
#define VISION_THETA_PGAIN 400
#endif
PRINT_CONFIG_VAR(VISION_THETA_PGAIN);
#ifndef VISION_THETA_IGAIN
#define VISION_THETA_IGAIN 20
#endif
PRINT_CONFIG_VAR(VISION_THETA_IGAIN);
#ifndef VISION_DESIRED_VX
#define VISION_DESIRED_VX 0
#endif
PRINT_CONFIG_VAR(VISION_DESIRED_VX);
#ifndef VISION_DESIRED_VY
#define VISION_DESIRED_VY 0
#endif
PRINT_CONFIG_VAR(VISION_DESIRED_VY);
/* Check the control gains */
#if (VISION_PHI_PGAIN < 0) || \
(VISION_PHI_IGAIN < 0) || \
(VISION_THETA_PGAIN < 0) || \
(VISION_THETA_IGAIN < 0)
#error "ALL control gains have to be positive!!!"
#endif
/* Initialize the default gains and settings */
struct opticflow_stab_t opticflow_stab = {
.phi_pgain = VISION_PHI_PGAIN,
.phi_igain = VISION_PHI_IGAIN,
.theta_pgain = VISION_THETA_PGAIN,
.theta_igain = VISION_THETA_IGAIN,
.desired_vx = VISION_DESIRED_VX,
.desired_vy = VISION_DESIRED_VY
};
/**
* Horizontal guidance mode enter resets the errors
* and starts the controller.
*/
void guidance_h_module_enter(void)
{
/* Reset the integrated errors */
opticflow_stab.err_vx_int = 0;
opticflow_stab.err_vy_int = 0;
/* Set rool/pitch to 0 degrees and psi to current heading */
opticflow_stab.cmd.phi = 0;
opticflow_stab.cmd.theta = 0;
opticflow_stab.cmd.psi = stateGetNedToBodyEulers_i()->psi;
}
/**
* Read the RC commands
*/
void guidance_h_module_read_rc(void)
{
// TODO: change the desired vx/vy
}
/**
* Main guidance loop
* @param[in] in_flight Whether we are in flight or not
*/
void guidance_h_module_run(bool_t in_flight)
{
/* Update the setpoint */
stabilization_attitude_set_rpy_setpoint_i(&opticflow_stab.cmd);
/* Run the default attitude stabilization */
stabilization_attitude_run(in_flight);
}
/**
* Update the controls based on a vision result
* @param[in] *result The opticflow calculation result used for control
*/
void stabilization_opticflow_update(struct opticflow_result_t *result)
{
/* Check if we are in the correct AP_MODE before setting commands */
if (autopilot_mode != AP_MODE_MODULE) {
return;
}
/* Calculate the error if we have enough flow */
float err_vx = 0;
float err_vy = 0;
if (result->tracked_cnt > 0) {
err_vx = opticflow_stab.desired_vx - result->vel_x;
err_vy = opticflow_stab.desired_vy - result->vel_y;
}
/* Calculate the integrated errors (TODO: bound??) */
opticflow_stab.err_vx_int += err_vx / 100;
opticflow_stab.err_vy_int += err_vy / 100;
/* Calculate the commands */
opticflow_stab.cmd.phi = opticflow_stab.phi_pgain * err_vx / 100
+ opticflow_stab.phi_igain * opticflow_stab.err_vx_int;
opticflow_stab.cmd.theta = -(opticflow_stab.theta_pgain * err_vy / 100
+ opticflow_stab.theta_igain * opticflow_stab.err_vy_int);
/* Bound the roll and pitch commands */
BoundAbs(opticflow_stab.cmd.phi, CMD_OF_SAT);
BoundAbs(opticflow_stab.cmd.theta, CMD_OF_SAT);
}
@@ -0,0 +1,61 @@
/*
* Copyright (C) 2014 Hann Woei Ho
* 2015 Freek van Tienen <freek.v.tienen@gmail.com>
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/stabilization_opticflow.h
* @brief Optical-flow based control for Linux based systems
*
* Control loops for optic flow based hovering.
* Computes setpoint for the lower level attitude stabilization to control horizontal velocity.
*/
#ifndef CV_STABILIZATION_OPTICFLOW_H_
#define CV_STABILIZATION_OPTICFLOW_H_
#include "std.h"
#include "lib/v4l/v4l2.h"
#include "inter_thread_data.h"
#include "math/pprz_algebra_int.h"
/* The opticflow stabilization */
struct opticflow_stab_t {
int32_t phi_pgain; //< The roll P gain on the err_vx
int32_t phi_igain; //< The roll I gain on the err_vx_int
int32_t theta_pgain; //< The pitch P gain on the err_vy
int32_t theta_igain; //< The pitch I gain on the err_vy_int
float desired_vx; //< The desired velocity in the x direction (cm/s)
float desired_vy; //< The desired velocity in the y direction (cm/s)
float err_vx_int; //< The integrated velocity error in x direction (m/s)
float err_vy_int; //< The integrated velocity error in y direction (m/s)
struct Int32Eulers cmd; //< The commands that are send to the hover loop
};
extern struct opticflow_stab_t opticflow_stab;
// Implement own Horizontal loops
extern void guidance_h_module_enter(void);
extern void guidance_h_module_read_rc(void);
extern void guidance_h_module_run(bool_t in_flight);
// Update the stabiliztion commands based on a vision result
void stabilization_opticflow_update(struct opticflow_result_t *vision);
#endif /* CV_STABILIZATION_OPTICFLOW_H_ */
@@ -1,283 +0,0 @@
/*
* Copyright (C) 2014 Hann Woei Ho
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/visual_estimator.c
* @brief Estimate velocity from optic flow.
*
* Using sensors from vertical camera and IMU of Parrot AR.Drone 2.0.
*
* Warning: all this code is called form the Vision-Thread: do not access any autopilot data in here.
*/
#include "std.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// Own Header
#include "visual_estimator.h"
// Computer Vision
#include "opticflow/optic_flow_int.h"
#include "opticflow/fast9/fastRosten.h"
// for FPS
#include "modules/computer_vision/cv/framerate.h"
// Local variables
struct visual_estimator_struct
{
// Image size
unsigned int imgWidth;
unsigned int imgHeight;
// Images
uint8_t *prev_frame;
uint8_t *gray_frame;
uint8_t *prev_gray_frame;
// Initialization
int old_img_init;
// Store previous
float prev_pitch;
float prev_roll;
} visual_estimator;
// ARDrone Vertical Camera Parameters
#define FOV_H 0.67020643276
#define FOV_W 0.89360857702
#define Fx_ARdrone 343.1211
#define Fy_ARdrone 348.5053
// Corner Detection
#define MAX_COUNT 100
// Flow Derotation
#define FLOW_DEROTATION
// Called by plugin
void opticflow_plugin_init(unsigned int w, unsigned int h, struct CVresults *results)
{
// Initialize variables
visual_estimator.imgWidth = w;
visual_estimator.imgHeight = h;
visual_estimator.gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t));
visual_estimator.prev_frame = (unsigned char *) calloc(w * h * 2, sizeof(uint8_t));
visual_estimator.prev_gray_frame = (unsigned char *) calloc(w * h, sizeof(uint8_t));
visual_estimator.old_img_init = 1;
visual_estimator.prev_pitch = 0.0;
visual_estimator.prev_roll = 0.0;
results->OFx = 0.0;
results->OFy = 0.0;
results->dx_sum = 0.0;
results->dy_sum = 0.0;
results->diff_roll = 0.0;
results->diff_pitch = 0.0;
results->cam_h = 0.0;
results->Velx = 0.0;
results->Vely = 0.0;
results->flow_count = 0;
results->cnt = 0;
results->count = 0;
framerate_init();
}
void opticflow_plugin_run(unsigned char *frame, struct PPRZinfo* info, struct CVresults *results)
{
// Corner Tracking
// Working Variables
int max_count = 25;
int borderx = 24, bordery = 24;
int x[MAX_COUNT], y[MAX_COUNT];
int new_x[MAX_COUNT], new_y[MAX_COUNT];
int status[MAX_COUNT];
int dx[MAX_COUNT], dy[MAX_COUNT];
int w = visual_estimator.imgWidth;
int h = visual_estimator.imgHeight;
// Framerate Measuring
results->FPS = framerate_run();
if (visual_estimator.old_img_init == 1) {
memcpy(visual_estimator.prev_frame, frame, w * h * 2);
CvtYUYV2Gray(visual_estimator.prev_gray_frame, visual_estimator.prev_frame, w, h);
visual_estimator.old_img_init = 0;
}
// *************************************************************************************
// Corner detection
// *************************************************************************************
// FAST corner detection
int fast_threshold = 20;
xyFAST *pnts_fast;
pnts_fast = fast9_detect((const byte *)visual_estimator.prev_gray_frame, w, h, w,
fast_threshold, &results->count);
if (results->count > MAX_COUNT) { results->count = MAX_COUNT; }
for (int i = 0; i < results->count; i++) {
x[i] = pnts_fast[i].x;
y[i] = pnts_fast[i].y;
}
free(pnts_fast);
// Remove neighboring corners
const float min_distance = 3;
float min_distance2 = min_distance * min_distance;
int labelmin[MAX_COUNT];
for (int i = 0; i < results->count; i++) {
for (int j = i + 1; j < results->count; j++) {
// distance squared:
float distance2 = (x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j]);
if (distance2 < min_distance2) {
labelmin[i] = 1;
}
}
}
int count_fil = results->count;
for (int i = results->count - 1; i >= 0; i--) {
int remove_point = 0;
if (labelmin[i]) {
remove_point = 1;
}
if (remove_point) {
for (int c = i; c < count_fil - 1; c++) {
x[c] = x[c + 1];
y[c] = y[c + 1];
}
count_fil--;
}
}
if (count_fil > max_count) { count_fil = max_count; }
results->count = count_fil;
// *************************************************************************************
// Corner Tracking
// *************************************************************************************
CvtYUYV2Gray(visual_estimator.gray_frame, frame, w, h);
opticFlowLK(visual_estimator.gray_frame, visual_estimator.prev_gray_frame, x, y,
count_fil, w, h, new_x, new_y, status, 5, 100);
results->flow_count = count_fil;
for (int i = count_fil - 1; i >= 0; i--) {
int remove_point = 1;
if (status[i] && !(new_x[i] < borderx || new_x[i] > (w - 1 - borderx) ||
new_y[i] < bordery || new_y[i] > (h - 1 - bordery))) {
remove_point = 0;
}
if (remove_point) {
for (int c = i; c < results->flow_count - 1; c++) {
x[c] = x[c + 1];
y[c] = y[c + 1];
new_x[c] = new_x[c + 1];
new_y[c] = new_y[c + 1];
}
results->flow_count--;
}
}
results->dx_sum = 0.0;
results->dy_sum = 0.0;
// Optical Flow Computation
for (int i = 0; i < results->flow_count; i++) {
dx[i] = new_x[i] - x[i];
dy[i] = new_y[i] - y[i];
}
// Median Filter
if (results->flow_count) {
quick_sort_int(dx, results->flow_count); // 11
quick_sort_int(dy, results->flow_count); // 11
results->dx_sum = (float) dx[results->flow_count / 2];
results->dy_sum = (float) dy[results->flow_count / 2];
} else {
results->dx_sum = 0.0;
results->dy_sum = 0.0;
}
// Flow Derotation
results->diff_pitch = (info->theta - visual_estimator.prev_pitch) * h / FOV_H;
results->diff_roll = (info->phi - visual_estimator.prev_roll) * w / FOV_W;
visual_estimator.prev_pitch = info->theta;
visual_estimator.prev_roll = info->phi;
float OFx_trans, OFy_trans;
#ifdef FLOW_DEROTATION
if (results->flow_count) {
OFx_trans = results->dx_sum - results->diff_roll;
OFy_trans = results->dy_sum - results->diff_pitch;
if ((OFx_trans <= 0) != (results->dx_sum <= 0)) {
OFx_trans = 0;
OFy_trans = 0;
}
} else {
OFx_trans = results->dx_sum;
OFy_trans = results->dy_sum;
}
#else
OFx_trans = results->dx_sum;
OFy_trans = results->dy_sum;
#endif
// Average Filter
OFfilter(&results->OFx, &results->OFy, OFx_trans, OFy_trans, results->flow_count, 1);
// Velocity Computation
if (info->agl < 0.01) {
results->cam_h = 0.01;
}
else {
results->cam_h = info->agl;
}
if (results->flow_count) {
results->Velx = results->OFy * results->cam_h * results->FPS / Fy_ARdrone + 0.05;
results->Vely = -results->OFx * results->cam_h * results->FPS / Fx_ARdrone - 0.1;
} else {
results->Velx = 0.0;
results->Vely = 0.0;
}
// *************************************************************************************
// Next Loop Preparation
// *************************************************************************************
memcpy(visual_estimator.prev_frame, frame, w * h * 2);
memcpy(visual_estimator.prev_gray_frame, visual_estimator.gray_frame, w * h);
}
@@ -1,41 +0,0 @@
/*
* Copyright (C) 2014 Hann Woei Ho
*
* This file is part of Paparazzi.
*
* Paparazzi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* Paparazzi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Paparazzi; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* @file modules/computer_vision/opticflow/visual_estimator.h
* @brief Estimate velocity from optic flow.
*
* Using sensors from vertical camera and IMU of Parrot AR.Drone 2.0
*/
#ifndef VISUAL_ESTIMATOR_H
#define VISUAL_ESTIMATOR_H
#include "inter_thread_data.h"
/**
* Initialize visual estimator.
* @param w image width
* @param h image height
*/
void opticflow_plugin_init(unsigned int w, unsigned int h, struct CVresults *results);
void opticflow_plugin_run(unsigned char *frame, struct PPRZinfo* info, struct CVresults* results);
#endif /* VISUAL_ESTIMATOR_H */
@@ -28,120 +28,235 @@
#include "opticflow_module.h"
// Computervision Runs in a thread
#include "opticflow/opticflow_thread.h"
#include "opticflow/inter_thread_data.h"
// Navigate Based On Vision, needed to call init/run_hover_stabilization_onvision
#include "opticflow/hover_stabilization.h"
// Threaded computer vision
#include <pthread.h>
// Sockets
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
int cv_sockets[2];
// Paparazzi Data
#include <pthread.h>
#include "state.h"
#include "subsystems/abi.h"
// Downlink
#include "subsystems/datalink/downlink.h"
#include "lib/v4l/v4l2.h"
#include "lib/encoding/jpeg.h"
#include "lib/encoding/rtp.h"
struct PPRZinfo opticflow_module_data;
/** height above ground level, from ABI
* Used for scale computation, negative value means invalid.
*/
/** default sonar/agl to use in opticflow visual_estimator */
/* default sonar/agl to use in opticflow visual_estimator */
#ifndef OPTICFLOW_AGL_ID
#define OPTICFLOW_AGL_ID ABI_BROADCAST
#endif
abi_event agl_ev;
static void agl_cb(uint8_t sender_id, float distance);
PRINT_CONFIG_VAR(OPTICFLOW_AGL_ID);
static void agl_cb(uint8_t sender_id __attribute__((unused)), float distance)
/* The video device */
#ifndef OPTICFLOW_DEVICE
#define OPTICFLOW_DEVICE /dev/video2
#endif
PRINT_CONFIG_VAR(OPTICFLOW_DEVICE);
/* The video device size (width, height) */
#ifndef OPTICFLOW_DEVICE_SIZE
#define OPTICFLOW_DEVICE_SIZE 320,240
#endif
#define __SIZE_HELPER(x, y) #x", "#y
#define _SIZE_HELPER(x) __SIZE_HELPER(x)
PRINT_CONFIG_MSG("OPTICFLOW_DEVICE_SIZE = " _SIZE_HELPER(OPTICFLOW_DEVICE_SIZE));
/* The video device buffers (the amount of V4L2 buffers) */
#ifndef OPTICFLOW_DEVICE_BUFFERS
#define OPTICFLOW_DEVICE_BUFFERS 15
#endif
PRINT_CONFIG_VAR(VIEWVIDEO_DEVICE_BUFFERS);
/* The main opticflow variables */
struct opticflow_t opticflow; //< Opticflow calculations
static struct opticflow_result_t opticflow_result; //< The opticflow result
static struct opticflow_state_t opticflow_state; //< State of the drone to communicate with the opticflow
static struct v4l2_device *opticflow_dev; //< The opticflow camera V4L2 device
static abi_event opticflow_agl_ev; //< The altitude ABI event
static pthread_t opticflow_calc_thread; //< The optical flow calculation thread
static bool_t opticflow_got_result; //< When we have an optical flow calculation
static pthread_mutex_t opticflow_mutex; //< Mutex lock fo thread safety
/* Static functions */
static void *opticflow_module_calc(void *data); //< The main optical flow calculation thread
static void opticflow_agl_cb(uint8_t sender_id, float distance); //< Callback function of the ground altitude
#if PERIODIC_TELEMETRY
#include "subsystems/datalink/telemetry.h"
/**
* Send optical flow telemetry information
* @param[in] *trans The transport structure to send the information over
* @param[in] *dev The link to send the data over
*/
static void opticflow_telem_send(struct transport_tx *trans, struct link_device *dev)
{
if (distance > 0) {
opticflow_module_data.agl = distance;
}
pthread_mutex_lock(&opticflow_mutex);
pprz_msg_send_OPTIC_FLOW_EST(trans, dev, AC_ID,
&opticflow_result.fps, &opticflow_result.corner_cnt,
&opticflow_result.tracked_cnt, &opticflow_result.flow_x,
&opticflow_result.flow_y, &opticflow_result.flow_der_x,
&opticflow_result.flow_der_y, &opticflow_result.vel_x,
&opticflow_result.vel_y,
&opticflow_stab.cmd.phi, &opticflow_stab.cmd.theta);
pthread_mutex_unlock(&opticflow_mutex);
}
#endif
#define DEBUG_INFO(X, ...) ;
/**
* Initialize the optical flow module for the bottom camera
*/
void opticflow_module_init(void)
{
// get AGL from sonar via ABI
AbiBindMsgAGL(OPTICFLOW_AGL_ID, &agl_ev, agl_cb);
// Subscribe to the altitude above ground level ABI messages
AbiBindMsgAGL(OPTICFLOW_AGL_ID, &opticflow_agl_ev, opticflow_agl_cb);
// Initialize local data
opticflow_module_data.cnt = 0;
opticflow_module_data.phi = 0;
opticflow_module_data.theta = 0;
opticflow_module_data.agl = 0;
// Set the opticflow state to 0
opticflow_state.phi = 0;
opticflow_state.theta = 0;
opticflow_state.agl = 0;
// Stabilization Code Initialization
init_hover_stabilization_onvision();
// Initialize the opticflow calculation
opticflow_calc_init(&opticflow, 320, 240);
opticflow_got_result = FALSE;
#ifdef OPTICFLOW_SUBDEV
PRINT_CONFIG_MSG("[opticflow_module] Configuring a subdevice!");
PRINT_CONFIG_VAR(OPTICFLOW_SUBDEV);
/* Initialize the V4L2 subdevice (TODO: fix hardcoded path, which and code) */
if (!v4l2_init_subdev(STRINGIFY(OPTICFLOW_SUBDEV), 0, 1, V4L2_MBUS_FMT_UYVY8_2X8, OPTICFLOW_DEVICE_SIZE)) {
printf("[opticflow_module] Could not initialize the %s subdevice.\n", STRINGIFY(OPTICFLOW_SUBDEV));
return;
}
#endif
/* Try to initialize the video device */
opticflow_dev = v4l2_init(STRINGIFY(OPTICFLOW_DEVICE), OPTICFLOW_DEVICE_SIZE, OPTICFLOW_DEVICE_BUFFERS);
if (opticflow_dev == NULL) {
printf("[opticflow_module] Could not initialize the video device\n");
}
#if PERIODIC_TELEMETRY
register_periodic_telemetry(DefaultPeriodic, "OPTIC_FLOW_EST", opticflow_telem_send);
#endif
}
/**
* Update the optical flow state for the calculation thread
* and update the stabilization loops with the newest result
*/
void opticflow_module_run(void)
{
pthread_mutex_lock(&opticflow_mutex);
// Send Updated data to thread
opticflow_module_data.cnt++;
opticflow_module_data.phi = stateGetNedToBodyEulers_f()->phi;
opticflow_module_data.theta = stateGetNedToBodyEulers_f()->theta;
int bytes_written = write(cv_sockets[0], &opticflow_module_data, sizeof(opticflow_module_data));
if (bytes_written != sizeof(opticflow_module_data) && errno !=4){
printf("[module] Failed to write to socket: written = %d, error=%d, %s.\n",bytes_written, errno, strerror(errno));
}
else {
DEBUG_INFO("[module] Write # %d (%d bytes)\n",opticflow_module_data.cnt, bytes_written);
}
opticflow_state.phi = stateGetNedToBodyEulers_f()->phi;
opticflow_state.theta = stateGetNedToBodyEulers_f()->theta;
// Read Latest Vision Module Results
struct CVresults vision_results;
// Warning: if the vision runs faster than the module, you need to read multiple times
int bytes_read = recv(cv_sockets[0], &vision_results, sizeof(vision_results), MSG_DONTWAIT);
if (bytes_read != sizeof(vision_results)) {
if (bytes_read != -1) {
printf("[module] Failed to read %d bytes: CV results from socket errno=%d.\n",bytes_read, errno);
}
} else {
////////////////////////////////////////////
// Module-Side Code
////////////////////////////////////////////
DEBUG_INFO("[module] Read vision %d\n",vision_results.cnt);
run_hover_stabilization_onvision(&vision_results);
// Update the stabilization loops on the current calculation
if (opticflow_got_result) {
stabilization_opticflow_update(&opticflow_result);
opticflow_got_result = FALSE;
}
pthread_mutex_unlock(&opticflow_mutex);
}
/**
* Start the optical flow calculation
*/
void opticflow_module_start(void)
{
pthread_t computervision_thread;
if (socketpair(AF_UNIX, SOCK_DGRAM, 0, cv_sockets) == 0) {
////////////////////////////////////////////
// Thread-Side Code
////////////////////////////////////////////
int rc = pthread_create(&computervision_thread, NULL, computervision_thread_main,
&cv_sockets[1]);
if (rc) {
printf("ctl_Init: Return code from pthread_create(mot_thread) is %d\n", rc);
}
// Check if we are not already running
if (opticflow_calc_thread != 0) {
printf("[opticflow_module] Opticflow already started!\n");
return;
}
else {
perror("Could not create socket.\n");
// Create the opticalflow calculation thread
int rc = pthread_create(&opticflow_calc_thread, NULL, opticflow_module_calc, NULL);
if (rc) {
printf("[opticflow_module] Could not initialize opticflow thread (return code: %d)\n", rc);
}
}
/**
* Stop the optical flow calculation
*/
void opticflow_module_stop(void)
{
computervision_thread_request_exit();
// Stop the capturing
v4l2_stop_capture(opticflow_dev);
// TODO: fix thread stop
}
/**
* The main optical flow calculation thread
* This thread passes the images trough the optical flow
* calculator based on Lucas Kanade
*/
#include "errno.h"
static void *opticflow_module_calc(void *data __attribute__((unused)))
{
// Start the streaming on the V4L2 device
if (!v4l2_start_capture(opticflow_dev)) {
printf("[opticflow_module] Could not start capture of the camera\n");
return 0;
}
#if OPTICFLOW_DEBUG
// Create a new JPEG image
struct image_t img_jpeg;
image_create(&img_jpeg, opticflow_dev->w, opticflow_dev->h, IMAGE_JPEG);
#endif
/* Main loop of the optical flow calculation */
while (TRUE) {
// Try to fetch an image
struct image_t img;
v4l2_image_get(opticflow_dev, &img);
// Copy the state
pthread_mutex_lock(&opticflow_mutex);
struct opticflow_state_t temp_state;
memcpy(&temp_state, &opticflow_state, sizeof(struct opticflow_state_t));
pthread_mutex_unlock(&opticflow_mutex);
// Do the optical flow calculation
struct opticflow_result_t temp_result;
opticflow_calc_frame(&opticflow, &temp_state, &img, &temp_result);
// Copy the result if finished
pthread_mutex_lock(&opticflow_mutex);
memcpy(&opticflow_result, &temp_result, sizeof(struct opticflow_result_t));
opticflow_got_result = TRUE;
pthread_mutex_unlock(&opticflow_mutex);
#if OPTICFLOW_DEBUG
jpeg_encode_image(&img, &img_jpeg, 70, FALSE);
rtp_frame_send(
&VIEWVIDEO_DEV, // UDP device
&img_jpeg,
0, // Format 422
70, // Jpeg-Quality
0, // DRI Header
0 // 90kHz time increment
);
#endif
// Free the image
v4l2_image_free(opticflow_dev, &img);
}
#if OPTICFLOW_DEBUG
image_free(&img_jpeg);
#endif
}
/**
* Get the altitude above ground of the drone
* @param[in] sender_id The id that send the ABI message (unused)
* @param[in] distance The distance above ground level in meters
*/
static void opticflow_agl_cb(uint8_t sender_id __attribute__((unused)), float distance)
{
// Update the distance if we got a valid measurement
if (distance > 0) {
opticflow_state.agl = distance;
}
}
@@ -28,7 +28,12 @@
#ifndef OPTICFLOW_MODULE_H
#define OPTICFLOW_MODULE_H
#include "std.h"
// Include opticflow calculator and stabilization loops
#include "opticflow/opticflow_calculator.h"
#include "opticflow/stabilization_opticflow.h"
// Needed for settings
extern struct opticflow_t opticflow;
// Module functions
extern void opticflow_module_init(void);
+29 -41
View File
@@ -42,9 +42,9 @@
// Video
#include "lib/v4l/v4l2.h"
#include "cv/resize.h"
#include "cv/encoding/jpeg.h"
#include "cv/encoding/rtp.h"
#include "lib/vision/image.h"
#include "lib/encoding/jpeg.h"
#include "lib/encoding/rtp.h"
// Threaded computer vision
#include <pthread.h>
@@ -142,19 +142,17 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
}
// Resize image if needed
struct img_struct small;
small.w = viewvideo.dev->w / viewvideo.downsize_factor;
small.h = viewvideo.dev->h / viewvideo.downsize_factor;
if (viewvideo.downsize_factor != 1) {
small.buf = (uint8_t *)malloc(small.w * small.h * 2);
} else {
small.buf = NULL;
}
struct image_t img_small;
image_create(&img_small,
viewvideo.dev->w / viewvideo.downsize_factor,
viewvideo.dev->h / viewvideo.downsize_factor,
IMAGE_YUV422);
// JPEG compression (8.25 bits are required for a 100% quality image, margin of ~0.55)
uint8_t *jpegbuf = (uint8_t *)malloc(ceil(small.w * small.h * 1.1));
// Create the JPEG encoded image
struct image_t img_jpeg;
image_create(&img_jpeg, img_small.w, img_small.h, IMAGE_JPEG);
// time
// Initialize timing
uint32_t microsleep = (uint32_t)(1000000. / (float)viewvideo.fps);
struct timeval last_time;
gettimeofday(&last_time, NULL);
@@ -175,14 +173,15 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
last_time = vision_thread_sleep_time;
// Wait for a new frame (blocking)
struct v4l2_img_buf *img = v4l2_image_get(viewvideo.dev);
struct image_t img;
v4l2_image_get(viewvideo.dev, &img);
// Check if we need to take a shot
if (viewvideo.take_shot) {
// Create a high quality image (99% JPEG encoded)
uint8_t *jpegbuf_hr = (uint8_t *)malloc(ceil(viewvideo.dev->w * viewvideo.dev->h * 1.1));
uint8_t *end = jpeg_encode_image(img->buf, jpegbuf_hr, 99, FOUR_TWO_TWO, viewvideo.dev->w, viewvideo.dev->h, TRUE);
uint32_t size = end - (jpegbuf_hr);
struct image_t jpeg_hr;
image_create(&jpeg_hr, img.w, img.h, IMAGE_JPEG);
jpeg_encode_image(&img, &jpeg_hr, 99, TRUE);
// Search for a file where we can write to
char save_name[128];
@@ -195,7 +194,7 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
printf("[viewvideo-thread] Could not write shot %s.\n", save_name);
} else {
// Save it to the file and close it
fwrite(jpegbuf_hr, sizeof(uint8_t), size, fp);
fwrite(jpeg_hr.buf, sizeof(uint8_t), jpeg_hr.buf_size, fp);
fclose(fp);
}
@@ -205,33 +204,25 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
}
// We finished the shot
free(jpegbuf_hr);
image_free(&jpeg_hr);
viewvideo.take_shot = FALSE;
}
// Only resize when needed
if (viewvideo.downsize_factor != 1) {
struct img_struct input;
input.buf = img->buf;
input.w = viewvideo.dev->w;
input.h = viewvideo.dev->h;
resize_uyuv(&input, &small, viewvideo.downsize_factor);
image_yuv422_downsample(&img, &img_small, viewvideo.downsize_factor);
jpeg_encode_image(&img_small, &img_jpeg, VIEWVIDEO_QUALITY_FACTOR, VIEWVIDEO_USE_NETCAT);
} else {
small.buf = img->buf;
jpeg_encode_image(&img, &img_jpeg, VIEWVIDEO_QUALITY_FACTOR, VIEWVIDEO_USE_NETCAT);
}
// JPEG encode the image:
uint8_t *end = jpeg_encode_image(small.buf, jpegbuf, VIEWVIDEO_QUALITY_FACTOR, FOUR_TWO_TWO, small.w, small.h, VIEWVIDEO_USE_NETCAT);
uint32_t size = end - (jpegbuf);
#if VIEWVIDEO_USE_NETCAT
// Open process to send using netcat (in a fork because sometimes kills itself???)
pid_t pid = fork();
if(pid < 0) {
if (pid < 0) {
printf("[viewvideo] Could not create netcat fork.\n");
}
else if(pid ==0) {
} else if (pid == 0) {
// We are the child and want to send the image
FILE *netcat = popen(nc_cmd, "w");
if (netcat != NULL) {
@@ -243,8 +234,7 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
// Exit the program since we don't want to continue after transmitting
exit(0);
}
else {
} else {
// We want to wait until the child is finished
wait(NULL);
}
@@ -252,8 +242,7 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
// Send image with RTP
rtp_frame_send(
&VIEWVIDEO_DEV, // UDP device
jpegbuf, size, // JPEG
small.w, small.h, // Img Size
&img_jpeg,
0, // Format 422
VIEWVIDEO_QUALITY_FACTOR, // Jpeg-Quality
0, // DRI Header
@@ -270,13 +259,12 @@ static void *viewvideo_thread(void *data __attribute__((unused)))
#endif
// Free the image
v4l2_image_free(viewvideo.dev, img);
v4l2_image_free(viewvideo.dev, &img);
}
// Free all buffers
free(jpegbuf);
if (viewvideo.downsize_factor != 1)
free(small.buf);
image_free(&img_jpeg);
image_free(&img_small);
return 0;
}