Multi-EKF support (ekf2)

- ekf2 can now run in multi-instance mode (currently up to 9 instances)
    - in multi mode all estimates are published to alternate topics (eg estimator_attitude instead of vehicle_attitude)
 - new ekf2 selector runs in multi-instance mode to monitor and compare all instances, selecting a primary (eg N x estimator_attitude => vehicle_attitude)
 - sensors module accel & gyro inconsistency checks are now relative to the mean of all instances, rather than the current primary (when active ekf2 selector is responsible for choosing primary accel & gyro)
 - existing consumers of estimator_status must check estimator_selector_status to select current primary instance status
 - ekf2 single instance mode is still fully supported and the default

Co-authored-by: Paul Riseborough <gncsolns@gmail.com>
This commit is contained in:
Daniel Agar
2020-10-27 10:56:11 -04:00
committed by GitHub
parent d5245a22d3
commit 0f411d6820
56 changed files with 1747 additions and 295 deletions

View File

@@ -14,13 +14,14 @@ from analysis.checks import perform_ecl_ekf_checks
from analysis.post_processing import get_estimator_check_flags
def analyse_ekf(
ulog: ULog, check_levels: Dict[str, float], red_thresh: float = 1.0,
amb_thresh: float = 0.5, min_flight_duration_seconds: float = 5.0,
ulog: ULog, check_levels: Dict[str, float], multi_instance: int = 0,
red_thresh: float = 1.0, amb_thresh: float = 0.5, min_flight_duration_seconds: float = 5.0,
in_air_margin_seconds: float = 5.0, pos_checks_when_sensors_not_fused: bool = False) -> \
Tuple[str, Dict[str, str], Dict[str, float], Dict[str, float]]:
"""
:param ulog:
:param check_levels:
:param multi_instance:
:param red_thresh:
:param amb_thresh:
:param min_flight_duration_seconds:
@@ -30,22 +31,19 @@ def analyse_ekf(
"""
try:
estimator_states = ulog.get_dataset('estimator_states').data
print('found estimator_states data')
estimator_states = ulog.get_dataset('estimator_states', multi_instance).data
except:
raise PreconditionError('could not find estimator_states data')
raise PreconditionError('could not find estimator_states instance', multi_instance)
try:
estimator_status = ulog.get_dataset('estimator_status').data
print('found estimator_status data')
estimator_status = ulog.get_dataset('estimator_status', multi_instance).data
except:
raise PreconditionError('could not find estimator_status data')
raise PreconditionError('could not find estimator_status instance', multi_instance)
try:
_ = ulog.get_dataset('estimator_innovations').data
print('found estimator_innovations data')
_ = ulog.get_dataset('estimator_innovations', multi_instance).data
except:
raise PreconditionError('could not find estimator_innovations data')
raise PreconditionError('could not find estimator_innovations instance', multi_instance)
try:
in_air = InAirDetector(
@@ -71,7 +69,7 @@ def analyse_ekf(
metrics = calculate_ecl_ekf_metrics(
ulog, innov_flags, innov_fail_checks, sensor_checks, in_air, in_air_no_ground_effects,
red_thresh=red_thresh, amb_thresh=amb_thresh)
multi_instance, red_thresh=red_thresh, amb_thresh=amb_thresh)
check_status, master_status = perform_ecl_ekf_checks(
metrics, sensor_checks, innov_fail_checks, check_levels)

View File

@@ -13,7 +13,7 @@ from analysis.detectors import InAirDetector
def calculate_ecl_ekf_metrics(
ulog: ULog, innov_flags: Dict[str, float], innov_fail_checks: List[str],
sensor_checks: List[str], in_air: InAirDetector, in_air_no_ground_effects: InAirDetector,
red_thresh: float = 1.0, amb_thresh: float = 0.5) -> Tuple[dict, dict, dict, dict]:
multi_instance: int = 0, red_thresh: float = 1.0, amb_thresh: float = 0.5) -> Tuple[dict, dict, dict, dict]:
sensor_metrics = calculate_sensor_metrics(
ulog, sensor_checks, in_air, in_air_no_ground_effects,
@@ -22,9 +22,9 @@ def calculate_ecl_ekf_metrics(
innov_fail_metrics = calculate_innov_fail_metrics(
innov_flags, innov_fail_checks, in_air, in_air_no_ground_effects)
imu_metrics = calculate_imu_metrics(ulog, in_air_no_ground_effects)
imu_metrics = calculate_imu_metrics(ulog, multi_instance, in_air_no_ground_effects)
estimator_status_data = ulog.get_dataset('estimator_status').data
estimator_status_data = ulog.get_dataset('estimator_status', multi_instance).data
# Check for internal filter nummerical faults
ekf_metrics = {'filter_faults_max': np.amax(estimator_status_data['filter_fault_flags'])}
@@ -44,10 +44,10 @@ def calculate_ecl_ekf_metrics(
def calculate_sensor_metrics(
ulog: ULog, sensor_checks: List[str], in_air: InAirDetector,
in_air_no_ground_effects: InAirDetector, red_thresh: float = 1.0,
amb_thresh: float = 0.5) -> Dict[str, float]:
in_air_no_ground_effects: InAirDetector, multi_instance: int = 0,
red_thresh: float = 1.0, amb_thresh: float = 0.5) -> Dict[str, float]:
estimator_status_data = ulog.get_dataset('estimator_status').data
estimator_status_data = ulog.get_dataset('estimator_status', multi_instance).data
sensor_metrics = dict()
@@ -131,10 +131,9 @@ def calculate_innov_fail_metrics(
return innov_fail_metrics
def calculate_imu_metrics(
ulog: ULog, in_air_no_ground_effects: InAirDetector) -> dict:
def calculate_imu_metrics(ulog: ULog, multi_instance, in_air_no_ground_effects: InAirDetector) -> dict:
estimator_status_data = ulog.get_dataset('estimator_status').data
estimator_status_data = ulog.get_dataset('estimator_status', multi_instance).data
imu_metrics = dict()
@@ -158,7 +157,7 @@ def calculate_imu_metrics(
in_air_no_ground_effects, np.mean)
# IMU bias checks
estimator_states_data = ulog.get_dataset('estimator_states').data
estimator_states_data = ulog.get_dataset('estimator_states', multi_instance).data
imu_metrics['imu_dang_bias_median'] = np.sqrt(np.sum([np.square(calculate_stat_from_signal(
estimator_states_data, 'estimator_states', signal, in_air_no_ground_effects, np.median))

View File

@@ -17,7 +17,7 @@ from plotting.data_plots import TimeSeriesPlot, InnovationPlot, ControlModeSumma
from analysis.detectors import PreconditionError
import analysis.data_version_handler as dvh
def create_pdf_report(ulog: ULog, output_plot_filename: str) -> None:
def create_pdf_report(ulog: ULog, multi_instance: int, output_plot_filename: str) -> None:
"""
creates a pdf report of the ekf analysis.
:param ulog:
@@ -29,20 +29,18 @@ def create_pdf_report(ulog: ULog, output_plot_filename: str) -> None:
# save the plots to PDF
try:
estimator_status = ulog.get_dataset('estimator_status').data
print('found estimator_status data')
estimator_status = ulog.get_dataset('estimator_status', multi_instance).data
except:
raise PreconditionError('could not find estimator_status data')
raise PreconditionError('could not find estimator_status instance', multi_instance)
try:
estimator_states = ulog.get_dataset('estimator_states').data
print('found estimator_states data')
estimator_states = ulog.get_dataset('estimator_states', multi_instance).data
except:
raise PreconditionError('could not find estimator_states data')
raise PreconditionError('could not find estimator_states instance', multi_instance)
try:
estimator_innovations = ulog.get_dataset('estimator_innovations').data
estimator_innovation_variances = ulog.get_dataset('estimator_innovation_variances').data
estimator_innovations = ulog.get_dataset('estimator_innovations', multi_instance).data
estimator_innovation_variances = ulog.get_dataset('estimator_innovation_variances', multi_instance).data
innovation_data = estimator_innovations
for key in estimator_innovation_variances:
# append 'var' to the field name such that we can distingush between innov and innov_var
@@ -65,7 +63,7 @@ def create_pdf_report(ulog: ULog, output_plot_filename: str) -> None:
for key in innovation_data:
innovation_data[key] = innovation_data[key][0:innovation_data_min_length]
print('found innovation data (merged estimator_innovations + estimator_innovation_variances)')
print('found innovation data (merged estimator_innovations + estimator_innovation_variances) instance', multi_instance)
except:
raise PreconditionError('could not find innovation data')

View File

@@ -90,6 +90,21 @@ def process_logdata_ekf(
except:
raise PreconditionError('could not open {:s}'.format(filename))
ekf_instances = 1
try:
estimator_selector_status = ulog.get_dataset('estimator_selector_status',).data
print('found estimator_selector_status (multi-ekf) data')
for instances_available in estimator_selector_status['instances_available']:
if instances_available > ekf_instances:
ekf_instances = instances_available
print(ekf_instances, 'ekf instances')
except:
print('could not find estimator_selector_status data')
try:
# get the dictionary of fail and warning test thresholds from a csv file
with open(check_level_dict_filename, 'r') as file:
@@ -100,30 +115,35 @@ def process_logdata_ekf(
raise PreconditionError('could not find {:s}'.format(check_level_dict_filename))
in_air_margin = 5.0 if sensor_safety_margins else 0.0
# perform the ekf analysis
master_status, check_status, metrics, airtime_info = analyse_ekf(
ulog, check_levels, red_thresh=1.0, amb_thresh=0.5, min_flight_duration_seconds=5.0,
in_air_margin_seconds=in_air_margin)
test_results = create_results_table(
check_table_filename, master_status, check_status, metrics, airtime_info)
for multi_instance in range(ekf_instances):
# write metadata to a .csv file
with open('{:s}.mdat.csv'.format(filename), "w") as file:
print('\nestimator instance:', multi_instance)
file.write("name,value,description\n")
# perform the ekf analysis
master_status, check_status, metrics, airtime_info = analyse_ekf(
ulog, check_levels, multi_instance, red_thresh=1.0, amb_thresh=0.5, min_flight_duration_seconds=5.0,
in_air_margin_seconds=in_air_margin)
# loop through the test results dictionary and write each entry on a separate row, with data comma separated
# save data in alphabetical order
key_list = list(test_results.keys())
key_list.sort()
for key in key_list:
file.write(key + "," + str(test_results[key][0]) + "," + test_results[key][1] + "\n")
print('Test results written to {:s}.mdat.csv'.format(filename))
test_results = create_results_table(
check_table_filename, master_status, check_status, metrics, airtime_info)
if plot:
create_pdf_report(ulog, '{:s}.pdf'.format(filename))
print('Plots saved to {:s}.pdf'.format(filename))
# write metadata to a .csv file
with open('{:s}-{:d}.mdat.csv'.format(filename, multi_instance), "w") as file:
file.write("name,value,description\n")
# loop through the test results dictionary and write each entry on a separate row, with data comma separated
# save data in alphabetical order
key_list = list(test_results.keys())
key_list.sort()
for key in key_list:
file.write(key + "," + str(test_results[key][0]) + "," + test_results[key][1] + "\n")
print('Test results written to {:s}-{:d}.mdat.csv'.format(filename, multi_instance))
if plot:
create_pdf_report(ulog, multi_instance, '{:s}-{:d}.pdf'.format(filename, multi_instance))
print('Plots saved to {:s}-{:d}.pdf'.format(filename, multi_instance))
return test_results