The Strategic Report on the 2025 World Solar Challenge

Author

The University of Michigan Solar Car Team

Published

September 5, 2024

Abstract
We provide a complete report of the race, route, weather, and vehicle.

Introduction

Import the relevant values:

Code
# Disable pandas warnings
# TODO(agoettsc): come up with a good justification
from warnings import simplefilter
simplefilter(action="ignore", category=FutureWarning)

import pandas as pd
simplefilter(action="ignore", category=pd.errors.PerformanceWarning)

import numpy as np
from numpy.polynomial.polynomial import Polynomial

from tqdm import tqdm

import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset

from scipy.stats import linregress
from scipy.interpolate import interp1d, LinearNDInterpolator, CubicSpline, RegularGridInterpolator

import seaborn

import glob
import tomli
import pvlib
import math
import re

Set up our custom matplotlib theme:

Code
# make custom matplotlib theme

MICHIGAN_BLUE = '#00274C'
MICHIGAN_MAIZE = '#FFCB05'

TAPPAN_RED = '#9A3324'
ROSS_ORANGE = '#D86018'
WAVE_FIELD_GREEN = '#A5A508'
A2_AMETHYST = '#702082'
TAUBMAN_TEAL = '#00B2A9'
LAW_QUAD_STONE = '#655A52'

ANGELL_HALL_ASH = '#989C97'

PUMA_BLACK = '#131516'
SOLID_WHITE = '#FFFFFF'

# make black #131516 instead
mpl.rcParams['text.color'] = PUMA_BLACK
mpl.rcParams['axes.labelcolor'] = PUMA_BLACK
mpl.rcParams['xtick.color'] = PUMA_BLACK
mpl.rcParams['ytick.color'] = PUMA_BLACK
mpl.rcParams['axes.edgecolor'] = PUMA_BLACK
mpl.rcParams['axes.facecolor'] = SOLID_WHITE

# set text to Montserrat
fe = fm.FontEntry(
       fname='./Montserrat-Regular.ttf',
       name='Montserrat')
fm.fontManager.ttflist.insert(0, fe) # or append is fine
mpl.rcParams['font.family'] = fe.name
mpl.rcParams['mathtext.fontset'] = 'custom'
mpl.rcParams['mathtext.it'] = fe.name + ':italic'
mpl.rcParams['mathtext.bf'] = fe.name + ':italic:bold'

# make lines thicker
mpl.rcParams['lines.linewidth'] = 2

# makes axes text larger
mpl.rcParams['axes.labelsize'] = 12
mpl.rcParams['axes.titlesize'] = 12

# set color palette
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=[MICHIGAN_BLUE, MICHIGAN_MAIZE, TAPPAN_RED, A2_AMETHYST, ROSS_ORANGE])

# turn grid on
mpl.rcParams['axes.grid'] = True
# put it behind
mpl.rcParams['axes.axisbelow'] = True

def set_yticks_rt_days():
    plt.yticks(1/3 + np.arange(25, 50, 3), ['3d +3h', '3d +6h', '4d', '4d +3h', '4d +6h', '5d', '5d + 3h', '5d + 6h', '6d'])

def set_xticks_race_distance(remaining=False):
    RD = 3024.7408 # Should be confirmed
    if not remaining:
        plt.xticks([0, 500, 1000, 1500, 2000, 2500, RD], ['Start', '500', '1000', '1500', '2000', '2500', 'End'])
    else:
        plt.gca().invert_xaxis()
        plt.xticks([0, 500, 1000, 1500, 2000, 2500, RD], ['End', '500', '1000', '1500', '2000', '2500', 'Start'])

def add_footnote(text):
    plt.figtext(0.1, -0.03, '†' + text, horizontalalignment='left', verticalalignment='bottom', fontsize=8)

def add_annotation(text, x, y, x_offset=0, y_offset=0, ha='center', va='center'):
    plt.annotate(text, (x, y),
                 textcoords="offset points", xytext=(x_offset, y_offset),
                 ha=ha, va=va,
                 bbox=dict(fc="white", ec="black", lw=0.5),
                 arrowprops=dict(arrowstyle='->'),
                 fontsize=8)

def fit_to_gslide(blank=True, half=False):
    fig = plt.gcf()
    height = 4.64 if blank else 3.69
    width = 9.32 if not half else 3.74
    fig.set_size_inches(width, height)

Establish relevant constant values:

Code
SPEED_AXIS = np.linspace(60, 130, 71)

HOT_LAP_DELAY_TIME = 0.258
WASTED_TIME = 0.239

Route Survey

We start with a simple analysis of our route.

Code
route = pd.read_csv("./data/route_6544_0m.csv")
race_route = route[route['segment_type'] == 'race']

Route Survey Summary

Parameter Value
Race Distance 3,024,740.80 m
Marshalling Distance 3,357.33 m
Total Distance 3,028,098.13 m
Net Elevation Change -32.646 m
Total Uphill Travel 6359.825 m
Total Downhill Travel 6392.471 m
Number of Control Stops 9
Number of Driver Swap Stops 2
Number of Stop Lights 39
Number of Stop Signs 0
Number of Flashing Yellows 1
Finish Line Speed Limit 60 kph
Longitudinal Distance-Weighted Average \(g\) 9.7891
Uphill Work-Weighted Average \(g\) 9.7885
Downhill Work-Weighted Average \(g\) 9.7890
Minimum Possible Legal Race Time (\(a = \infty\)) 93200.26s = 25:53:20.26
Minimum Reasonable Legal Race Time (\(a = 0.3g\)) TBD
Minimum Reasonable Race Time (\(a = 0.3g\), 2kph speed) TBD

Geographical Analysis

First, we analyze total distances:

Code
race_distance = route[route['segment_type'] == 'race']['distance'].sum()
marshalling_distance = route[route['segment_type'] == 'marshaling']['distance'].sum()
total_distance = race_distance + marshalling_distance

print(f'Race Distance: {race_distance} m')
print(f'Marshaling Distance: {marshalling_distance} m')
print(f'Total Distance: {total_distance} m')
Race Distance: 3023123.67 m
Marshaling Distance: 3357.24 m
Total Distance: 3026480.91 m

Next, we analyze elevation changes:

Code
# use grade data, not elevation data, since its more reliable
race_route['elevation_change'] = race_route['grade'] * race_route['distance']
net_elevation_change = race_route['elevation_change'].sum()
total_uphill_travel = race_route[race_route['grade'] > 0]['elevation_change'].sum()
total_downhill_travel = race_route[race_route['grade'] < 0]['elevation_change'].sum()

print(f'Net Elevation Change: {net_elevation_change} m')
print(f'Total Uphill Travel: {total_uphill_travel} m')
print(f'Total Downhill Travel: {total_downhill_travel} m')

plt.plot(route['distance'].cumsum() / 1000, route['elevation'])

plt.xlabel('Distance Traveled (km)')
plt.ylabel('Elevation (m)')
plt.title('Elevation vs Distance')
plt.xlim([0, total_distance / 1000])
plt.ylim([0, 800])

# annotate peak elevation
peak_elevation = route['elevation'].max()
peak_elevation_distance = route['distance'].cumsum()[route['elevation'].idxmax()] / 1000

add_annotation(f'Peak Elevation: {round(peak_elevation,1)} m\nat {round(peak_elevation_distance)} km',
               peak_elevation_distance, peak_elevation, x_offset=20, y_offset=0, ha='left', va='center')

plt.show()
Net Elevation Change: -17.965383836 m
Total Uphill Travel: 6337.844659619 m
Total Downhill Travel: -6355.810043455 m

We should also plot elevation when accounting for the curvature of the Earth.

Features Analysis

We count the number of stop lights, stop signs, and flashing yellows:

Code
# stop light if segment_end_condition is traffic_light
num_stop_lights = race_route[race_route['segment_end_condition'] == 'traffic_light'].shape[0]
num_stop_signs = race_route[race_route['segment_end_condition'] == 'red_sign'].shape[0]
num_yield_signs = race_route[race_route['segment_end_condition'] == 'yield_sign_or_blinking_yellow'].shape[0]

print(f'Number of Stop Lights: {num_stop_lights}')
print(f'Number of Stop Signs: {num_stop_signs}')
print(f'Number of Flashing Yellows: {num_yield_signs}')
Number of Stop Lights: 39
Number of Stop Signs: 0
Number of Flashing Yellows: 1

Gravity Analysis

First, plot gravity over distance.

Code
plt.plot(route['distance'].cumsum() / 1000, route['gravity'])

plt.xlabel('Distance Traveled (km)')
plt.ylabel('Acceleration due to Gravity (m/s/s)')
plt.title('Acceleration due to Gravity vs Distance')
plt.xlim([0, total_distance / 1000])
set_xticks_race_distance()
plt.ylim([9.782, 9.798])
plt.show()

Now, take weighted averages of gravity:

Code
distance_weighted_avg_g = (route['gravity'] * route['distance']).sum() / route['distance'].sum()

# uphill travel is distance traveled uphill per segment
uphill_travel = route['grade'] * route['distance'] * (route['grade'] > 0)
uphill_work_weighted_avg_g = (route['gravity'] * uphill_travel).sum() / uphill_travel.sum()

# downhill travel is distance traveled downhill per segment
downhill_travel = route['grade'] * route['distance'] * (route['grade'] < 0)
downhill_work_weighted_avg_g = (route['gravity'] * downhill_travel).sum() / downhill_travel.sum()

print(f'Distance-Weighted Average g: {distance_weighted_avg_g}')
print(f'Uphill Work-Weighted Average g: {uphill_work_weighted_avg_g}')
print(f'Downhill Work-Weighted Average g: {downhill_work_weighted_avg_g}')
Distance-Weighted Average g: 9.780757340905776
Uphill Work-Weighted Average g: 9.78097156500531
Downhill Work-Weighted Average g: 9.78147959325745

Hill-Climbing Analysis

Plot a distribution of grade weighted by distance:

Code
plt.hist(route['grade'] * 100, bins=100, weights=route['distance'], density=True)
plt.xlabel('Grade (%)')
plt.ylabel('Density')
plt.title('Distance-Weighted Grade Distribution')
plt.xlim([-6,6])
plt.show()

Plot a distribution of grade weighted by gravity times distance:

Code
plt.hist(route['grade'] * 100, bins=100, weights=route['gravity'] * route['distance'], density=True)
plt.xlabel('Grade (%)')
plt.ylabel('Gravity * Distance')
plt.title('Gravity-times-Distance-Weighted Grade Distribution')
plt.xlim([-6,6])
plt.show()

Determine the minimum and maximum grades:

Code
min_grade = route['grade'].min()
max_grade = route['grade'].max()

print(f'Min Grade: {min_grade}')
print(f'Max Grade: {max_grade}')
Min Grade: -0.055443
Max Grade: 0.052716
Code
largest_incline_left = []
distance_left = []
route['normalized_grade'] = route['grade'] * route['gravity'] / distance_weighted_avg_g
end_of_race = route[route['segment_end_condition'] == 'endOfRace'].index[0]

for i in range(len(route)):
    if i < end_of_race:
        largest_incline_left.append(max(max(route['grade'][i:end_of_race] * 100),0))
        distance_left.append(race_distance - route['distance'][:i].sum())
    else:
        largest_incline_left.append(max(max(route['grade'][i:] * 100),0))
        distance_left.append(race_distance - route['distance'][:i].sum())

largest_incline_left_scatter = largest_incline_left.copy()
distance_left_scatter = distance_left.copy()

for i in range(len(largest_incline_left) - 1):
    if largest_incline_left_scatter[i] == largest_incline_left_scatter[i + 1]:
        largest_incline_left_scatter[i] = None
        distance_left_scatter[i] = None

distance_left_other_scatter = []
grade_other_scatter = []

for i in range(len(route)):
    if route['grade'][i] > 0:
        distance_left_other_scatter.append(total_distance - route['distance'][:i].sum())
        grade_other_scatter.append(route['grade'][i] * 100)

i = 0
while i < len(distance_left_other_scatter) - 1:
    if abs(distance_left_other_scatter[i + 1] - distance_left_other_scatter[i]) < 20000:
        # remove less steep point
        if grade_other_scatter[i] < grade_other_scatter[i + 1]:
            del distance_left_other_scatter[i]
            del grade_other_scatter[i]
        else:
            del distance_left_other_scatter[i + 1]
            del grade_other_scatter[i + 1]
        i -= 1
    i += 1

# remove all points that overlap with the steepest hills
ii = [i for i in range(len(distance_left_other_scatter)) if distance_left_other_scatter[i] in distance_left_scatter]
for i in ii:
    distance_left_other_scatter[i] = None
    grade_other_scatter[i] = None

largest_incline_left_scatter = [x for x in largest_incline_left_scatter if x is not None]
distance_left_scatter = [x for x in distance_left_scatter if x is not None]
grade_other_scatter = [x for x in grade_other_scatter if x is not None]
distance_left_other_scatter = [x for x in distance_left_other_scatter if x is not None]

kk = np.where(np.array(distance_left) > 0)[0][-1]
jj = np.where(np.array(distance_left_scatter) > 0)[0][-1]

plt.plot(np.array(distance_left[:kk]) / 1000, largest_incline_left[:kk], color = '#00274C')
plt.scatter(np.array(distance_left_other_scatter) / 1000, grade_other_scatter, color='#989C97', label='Other Steep Hills', facecolors='none')
plt.scatter(np.array(distance_left_scatter[:jj]) / 1000, largest_incline_left_scatter[:jj], label='Steepest Hills', color = '#00274C')

plt.xlim([0 - 100, total_distance / 1000])
set_xticks_race_distance(remaining=True)
plt.xlabel('Distance Left [km]')

plt.ylabel('Steepest (Normalized) Grade Remaining [%]')
plt.ylim([0, 6])
plt.figtext(0.11, 0.01, f'Normalized to g={round(distance_weighted_avg_g,4)}', horizontalalignment='left', verticalalignment='bottom', fontsize=8)
plt.title('WSC\'s Steepest Hills')


route_index = route.index[abs(distance_left_scatter[0] - (race_distance - route['distance'].cumsum())) < 0.5]
add_annotation(f'{round(largest_incline_left_scatter[0], 2)}% grade\n{round(distance_left_scatter[0] / 1000)} km left\n{round(route["distance"][route_index].values[0], 1)} m long',
               distance_left_scatter[0] / 1000, largest_incline_left_scatter[0], x_offset=30, y_offset=0, ha='left', va='center')
route_index = route.index[abs(distance_left_scatter[1] - (race_distance - route['distance'].cumsum())) < 0.5]
add_annotation(f'{round(largest_incline_left_scatter[1], 2)}% grade\n{round(distance_left_scatter[1] / 1000)} km left\n{round(route["distance"][route_index].values[0], 1)} m long',
               distance_left_scatter[1] / 1000, largest_incline_left_scatter[1], x_offset=10, y_offset=30, ha='left', va='center')
route_index = route.index[abs(distance_left_scatter[2] - (race_distance - route['distance'].cumsum())) < 0.5]
add_annotation(f'{round(largest_incline_left_scatter[2], 2)}% grade\n{round(distance_left_scatter[2] / 1000)} km left\n{round(route["distance"][route_index].values[0], 1)} m long',
               distance_left_scatter[2] / 1000, largest_incline_left_scatter[2], x_offset=20, y_offset=30, ha='left', va='center')

route_index = route.index[abs(distance_left_scatter[3] - (race_distance - route['distance'].cumsum())) < 0.5]
add_annotation(f'{round(largest_incline_left_scatter[3], 2)}% grade\n{round(distance_left_scatter[3] / 1000)} km left\n{round(route["distance"][route_index].values[0], 1)} m long',
               distance_left_scatter[3] / 1000, largest_incline_left_scatter[3], x_offset=40, y_offset=0, ha='left', va='bottom')

for i in range(3):
    lat = route['start_latitude'][abs(distance_left_scatter[i] - (race_distance - route['distance'].cumsum())) < 0.5].values[0]
    lon = route['start_longitude'][abs(distance_left_scatter[i] - (race_distance - route['distance'].cumsum())) < 0.5].values[0]
    print(f'https://www.google.com/maps/?q={lat},{lon}')


# do a mark inset for last 50 km
final_stretch = 0
kk = np.where(np.array(distance_left) < final_stretch)[0][0]
ii = np.where(np.array(distance_left_scatter) < final_stretch)[0][0]
inset_ax = plt.axes([1, 0.2, 0.25, 0.25])
plt.plot(np.array(distance_left[kk:]), largest_incline_left[kk:], color = '#00274C')
plt.scatter(np.array(distance_left_scatter[ii:]), largest_incline_left_scatter[ii:], color = '#00274C')

inset_ax.set_xticks([-marshalling_distance, 0], [f'Finish', f'Marshal Pt.\n({round(marshalling_distance)} m)'])
plt.gca().invert_xaxis()

route_index = route.index[abs(distance_left_scatter[-4] - (race_distance - route['distance'].cumsum())) < 0.01]
add_annotation(f'{round(largest_incline_left_scatter[-4], 2)}% grade\n{round(distance_left_scatter[-4] / 1000)} km left\n{round(route["distance"][route_index].values[0], 1)} m long',
               distance_left_scatter[-4] / 1000, largest_incline_left_scatter[-4], x_offset=-20, y_offset=-20, ha='left', va='center')

# set y limits
plt.ylim([0, 6])

plt.title('Hills in Marshalling')

plt.show()
https://www.google.com/maps/?q=-13.5692957,131.4822804
https://www.google.com/maps/?q=-32.478482,137.7515229
https://www.google.com/maps/?q=-33.967054,138.1850414

Velocity Analysis

First, plot speed limit vs distance.

Code
plt.plot(route['distance'].cumsum() / 1000, route['speed_limit'] * 3.6)

plt.xlabel('Distance Traveled (km)')
plt.ylabel('Speed Limit (kph)')
plt.title('Speed Limit vs Distance')
plt.show()

Second, plot distance-weighted speed limit distribution:

Code
# write speed_limit distribution to integers
plt.hist((route['speed_limit'] * 3.6).astype(int), bins=101, weights=route['distance'], density=True)
plt.gca().yaxis.set_major_formatter(mpl.ticker.PercentFormatter(xmax=1))
plt.xlabel('Speed Limit (kph)')
plt.ylabel('Density')
plt.title('Distance-Weighted Speed Limit Distribution')
plt.xlim([20, 140])
plt.ylim([0,0.6])
plt.show()

Now, we calculate the minimum reasonable time to complete this, assuming a 0.3g acceleration and deceleration. We also stop at stop signs and lights.

Code
CONTROL_STOP_RULES_DURATION = 30 * 60
CONTROL_STOP_TRUE_DURATION = CONTROL_STOP_RULES_DURATION + 90
DRIVER_SWAPS_RULES_DURATION = 5 * 60
DRIVER_SWAPS_TRUE_DURATION = DRIVER_SWAPS_RULES_DURATION + 15
RED_SIGN_DURATION = 15
TRAFFIC_LIGHT_DURATION = 60
YIELD_SIGN_DURATION = 10

ACCELERATION_TIME_RESOLUTION = 0.1

def generate_speed_profile(route, target_speed_kph, acceleration_mpsps=0.5, deceleration_mpsps=0.5, acceptable_speeding_kph=0.0, hot_lap_delay=0.0):
    # iterate through the route in both directions to determine what speed we should go
    # never exceed the speed limit + acceptable speeding
    # stop for all stop signs, stop lights, and flashing yellows
    # stop for control stops and driver swaps
    # stop at end of day (after 9, 18, 27, etc...)
    # finish end at 60 kph + acceptable speeding
    # start from 0 kph

    target_v = target_speed_kph / 3.6
    speeding = acceptable_speeding_kph / 3.6
    a_target = acceleration_mpsps
    da_target = deceleration_mpsps

    # create two profiles, one for driving forward, one for driving backward
    # each profile has distance, time, speed, and acceleration
    # we will sequentially add points to each profile
    profile = pd.DataFrame(columns=['distance', 'time', 'speed', 'target_speed', 'acceleration', 'process'])
    # we also create a profile for speed_limit
    speed_limit_profile = pd.DataFrame(columns=['distance', 'speed_limit'])
    # we also create a list of points where we have to stop
    control_stops = []
    driver_swaps = []
    red_sign_stops = []
    traffic_light_stops = []
    flashing_yellow_stops = []
    eod_stop_times = 9 * 3600 * np.arange(1, 10)

    # populate speed_limit_profile with speed limits
    # make it only places where the speed limit changes
    for i in range(1,len(route)):
        if route['speed_limit'][i] != route['speed_limit'][i - 1]:
            speed_limit_profile = pd.concat([speed_limit_profile, pd.DataFrame({'distance': route['distance'].cumsum()[i], 'speed_limit': route['speed_limit'][i]}, index=[0])], ignore_index=True)
    # add 40 to very start
    speed_limit_profile = pd.concat([pd.DataFrame({'distance': 0, 'speed_limit': 40 / 3.6}, index=[0]), speed_limit_profile], ignore_index=True)
    speed_limit_profile.sort_values('distance', inplace=True)

    # populate stop ooints
    for i in range(len(route)):
        if route['segment_end_condition'][i] == 'control_stop':
            control_stops.append(route['distance'].cumsum()[i])
        elif route['segment_end_condition'][i] == 'driver_swap':
            driver_swaps.append(route['distance'].cumsum()[i])
        elif route['segment_end_condition'][i] == 'red_sign':
            red_sign_stops.append(route['distance'].cumsum()[i])
        elif route['segment_end_condition'][i] == 'traffic_light':
            traffic_light_stops.append(route['distance'].cumsum()[i])
        elif route['segment_end_condition'][i] == 'yield_sign_or_blinking_yellow':
            flashing_yellow_stops.append(route['distance'].cumsum()[i])
    
    # start from the beginning
    d_traveled = 0 # meters
    v = 0 # speed
    t = hot_lap_delay * 3600 # seconds
    a = 0 # acceleration
    at_stop = False
    stop_type = None
    eod_locations = []

    while d_traveled < race_distance:
        # we are either accelerating or cruising
        # we do calcs until next steps:
        # 1. we accelerate to the target speed / speed limit
        # 2. we reach a change in speed limit
        # 3. we reach a spot where we have to stop
        speed_limit = speed_limit_profile['speed_limit'][speed_limit_profile['distance'] <= d_traveled].iloc[-1]
        next_speed_limit_change_d = speed_limit_profile['distance'][speed_limit_profile['distance'] > d_traveled].iloc[0]
        next_control_stop_d = min([x for x in control_stops if x > d_traveled], default=int(1e15))
        next_driver_swap_d = min([x for x in driver_swaps if x > d_traveled], default=int(1e15))
        next_red_sign_d = min([x for x in red_sign_stops if x > d_traveled], default=int(1e15))
        next_traffic_light_d = min([x for x in traffic_light_stops if x > d_traveled], default=int(1e15))
        next_flashing_yellow_d = min([x for x in flashing_yellow_stops if x > d_traveled], default=int(1e15))
        # determine next stop and what type it is
        next_stop_d = min(next_control_stop_d, next_driver_swap_d, next_red_sign_d, next_traffic_light_d, next_flashing_yellow_d)
        if next_stop_d == next_control_stop_d:
            stop_type = 'control_stop'
        elif next_stop_d == next_driver_swap_d:
            stop_type = 'driver_swap'
        elif next_stop_d == next_red_sign_d:
            stop_type = 'red_sign'
        elif next_stop_d == next_traffic_light_d:
            stop_type = 'traffic_light'
        elif next_stop_d == next_flashing_yellow_d:
            stop_type = 'flashing_yellow'
        next_eod_stop_time = min([x for x in eod_stop_times if x > t], default=int(1e15))

        actual_target_v = min(target_v, speed_limit + speeding)
        if abs(v - actual_target_v) < 0.1:
            # keep cruising until next speed limit change, stop, or end
            profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': 0, 'process': 'c1'}, index=[0])], ignore_index=True)

            time_to_next_stop = (next_stop_d - d_traveled) / v
            time_to_next_speed_limit_change = (next_speed_limit_change_d - d_traveled) / v
            time_to_end_of_day = next_eod_stop_time - t
            time_to_end = (race_distance - d_traveled) / v

            t_cruise = min(time_to_next_stop, time_to_next_speed_limit_change, time_to_end_of_day, time_to_end)
            next_reason = None
            if t_cruise == time_to_next_stop:
                next_reason = 'stop'
            elif t_cruise == time_to_next_speed_limit_change:
                next_reason = 'speed_limit_change'
            elif t_cruise == time_to_end_of_day:
                next_reason = 'end_of_day'
            elif t_cruise == time_to_end:
                next_reason = 'end'

            d_traveled += v * t_cruise
            t += t_cruise

            profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': 0, 'process': 'c2'}, index=[0])], ignore_index=True)

            if next_reason == 'stop':
                v = 0
                # add corresponding time
                if stop_type == 'control_stop':
                    t += CONTROL_STOP_TRUE_DURATION
                elif stop_type == 'driver_swap':
                    t += DRIVER_SWAPS_TRUE_DURATION
                elif stop_type == 'red_sign':
                    t += RED_SIGN_DURATION
                elif stop_type == 'traffic_light':
                    t += TRAFFIC_LIGHT_DURATION
                elif stop_type == 'flashing_yellow':
                    t += YIELD_SIGN_DURATION
            if next_reason == 'end_of_day':
                v = 0
                eod_locations.append(d_traveled)
            if next_reason == 'end':
                v = 60 / 3.6
            profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': a_target, 'process': 'c3'}, index=[0])], ignore_index=True)
        elif v < actual_target_v:
            # we need to accelerate, let's start by accelerating up to speed
            a = a_target
            t_to_target = (actual_target_v - v) / a
            d_acc = v * t_to_target + 0.5 * a * t_to_target ** 2
            acc_to = min(next_stop_d, next_speed_limit_change_d, race_distance)
            if d_traveled + d_acc > acc_to: # accelerating past our next stop :(
                profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': a, 'process': 'atp1'}, index=[0])], ignore_index=True)
                # we will reach the next point before we reach the target speed
                # find the time, speed at which we reach the next point while accelerating
                delta_t = (-v + np.sqrt(v ** 2 + 2 * (acc_to - d_traveled))) / a
                t += delta_t
                v += a * delta_t
                d_traveled = acc_to
                at_stop = True
                profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': 0, 'process': 'atp2'}, index=[0])], ignore_index=True)
            else:
                # we're good to reach the target speed
                profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': a, 'process': 'af1'}, index=[0])], ignore_index=True)
                t += t_to_target
                v = actual_target_v
                a = 0
                d_traveled += d_acc
                stop_type = None
                profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': 0, 'process': 'af2'}, index=[0])], ignore_index=True)
        elif v > actual_target_v:
            profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': v, 'target_speed': actual_target_v, 'acceleration': 0, 'process': 'd1'}, index=[0])], ignore_index=True)
            profile = pd.concat([profile, pd.DataFrame({'distance': d_traveled, 'time': t, 'speed': actual_target_v, 'target_speed': actual_target_v, 'acceleration': 0, 'process': 'd2'}, index=[0])], ignore_index=True)
            v = actual_target_v

    # print number of stops by category
    # print(f'Control Stops: {len(control_stops)}')
    # print(f'Driver Swaps: {len(driver_swaps)}')
    # print(f'Red Signs: {len(red_sign_stops)}')
    # print(f'Traffic Lights: {len(traffic_light_stops)}')
    # print(f'Flashing Yellows: {len(flashing_yellow_stops)}')
    # print(f'End of Days: {len(eod_locations)}')

    # convert speed

    return profile, speed_limit_profile, eod_locations

Now we want to plot race time as a function of target speed.

Code
def get_true_race_time(route, target_speed_kph, acceleration_mpsps=1, deceleration_mpsps=0.5, acceptable_speeding_kph=0, hot_lap_delay=0):
    profile, speed_limit_profile, eod_locations = generate_speed_profile(route, target_speed_kph, acceleration_mpsps, deceleration_mpsps, acceptable_speeding_kph, hot_lap_delay)
    return profile['time'].iloc[-1] - (30 * 9 + 2 * 10) * 60 - hot_lap_delay * 3600


def get_official_race_time(route, target_speed_kph, acceleration_mpsps=1, deceleration_mpsps=0.5, acceptable_speeding_kph=0, hot_lap_delay=0, wasted_time=0):
    true_race_time = get_true_race_time(route, target_speed_kph, acceleration_mpsps, deceleration_mpsps, acceptable_speeding_kph, hot_lap_delay)
    return true_race_time + (hot_lap_delay + wasted_time) * 3600, true_race_time

# plot true race time vs speed
speeds = np.linspace(60, 130, 71)
times = np.array([get_official_race_time(route, speed, 1.5, 0.5, 0, HOT_LAP_DELAY_TIME, WASTED_TIME) for speed in speeds])
official_race_times = times[:, 0]
true_race_times = times[:, 1]

# calculate what speed is needed to finish in 4 days = 31.33 hours
# use interpolation
f = interp1d(official_race_times, speeds)
target_speed = float(f(31 * 3600 + 19 * 60 + 59))
g = interp1d(speeds, official_race_times)
time_at_110 = float(g(110))

# plot the results
plt.xlabel('Target Speed [kph]')
plt.xlim([60, 130])

plt.ylabel('Official Race Time [h]')
plt.ylim([25 + 1/3, 49 + 1/3])
set_yticks_rt_days()

plt.plot([60, target_speed, target_speed], [31 + 1/3, 31 + 1/3, 0], '--', color=MICHIGAN_MAIZE)
plt.plot([110, 110], [0, time_at_110 / 3600], '--', color=ANGELL_HALL_ASH)

plt.plot(speeds, official_race_times / 3600, label='Official')
plt.plot(speeds, true_race_times / 3600, label='True', color=ROSS_ORANGE)

plt.title('Official Race Time vs. Target Speed')

add_footnote(f'Given hot lap delay plus wasted time of {round(60 * (HOT_LAP_DELAY_TIME + WASTED_TIME))} min.')

add_annotation(f'To finish in 4 days,\ntarget {round(target_speed, 1)} kph.',
               target_speed, 31 + 1/3, x_offset=0, y_offset=20, ha='left', va='bottom')
add_annotation(f'S. Australia Speed Limit',
               110, time_at_110 / 3600, x_offset=0, y_offset=20, ha='left', va='bottom')

plt.legend()

fit_to_gslide()

target_speed_df = pd.DataFrame({'target_speed': speeds, 'true_race_time': true_race_times, 'official_race_time': official_race_times})

# save with 900 dpi
plt.savefig('official_race_time_vs_target_speed', dpi=600)

We also want to calculate how much race time we could save by uniformly speeding:

Code
# Calculate benefits of speeding ... at target speed, how much time can we save by speeding

speeding = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5]
times = np.array([get_true_race_time(route, target_speed, 1.5, 0.5, speed, HOT_LAP_DELAY_TIME) for speed in speeding])

times = times[0] - times

# plot results
plt.xlabel('Speeding [kph]')
plt.ylabel('Race Time Saved [min]')
plt.plot(speeding, times / 60)
# do a linear fit with intercept forcecd to be 0
slope, intercept, r_value, p_value, std_err = linregress(speeding, times)

plt.plot(speeding, [x * slope / 60 + intercept / 60 for x in speeding], '--', color=MICHIGAN_MAIZE)
plt.figtext(0.11, -0.03, f'*At target speed of {round(target_speed,2)} kph. Given hot lap delay plus wasted time of {round(HOT_LAP_DELAY_TIME + WASTED_TIME, 2)} h.',
            horizontalalignment='left', verticalalignment='bottom', fontsize=8)
plt.title('Time Saved by Speeding')
# plt.annotate(f'~{round(slope)} s/kph',
#             (2.5,4),
#             textcoords="offset points", xytext=(30, -30),
#             ha='left', va='bottom',
#             bbox=dict(fc="white", ec="black", lw=0.5),
#             arrowprops=dict(arrowstyle='->'),
#             fontsize=8)
add_annotation(f'~{round(slope)} s/kph',
               2.5, times[-6] / 60, x_offset=20, y_offset=-20, ha='left', va='bottom')
plt.ylim([0, 10])
plt.xlim([0, 5])

We also want to study decelerations:

Code
def characterize_decelerations(route, target_speed_kph, acceleration_mpsps=1, deceleration_mpsps=0.5, acceptable_speeding_kph=0, hot_lap_delay=0, wasted_time=0):
    speed_profile, _, _ = generate_speed_profile(route, target_speed_kph, acceleration_mpsps, deceleration_mpsps, acceptable_speeding_kph, hot_lap_delay)
    squared_speed_sum = 0
    num_decelerations = 1
    for i in range(1, len(speed_profile)):
        if speed_profile['speed'][i] < speed_profile['speed'][i - 1]:
            squared_speed_sum += (speed_profile['speed'][i - 1] ** 2 - speed_profile['speed'][i] ** 2)
            num_decelerations += 1
    return squared_speed_sum, num_decelerations

# get squared_speed_sums and num_decelerations
decelerations = np.array([characterize_decelerations(route, speed, 1.5, 0.5, 0, HOT_LAP_DELAY_TIME, WASTED_TIME) for speed in SPEED_AXIS])
squared_speed_sums = np.array([x[0] for x in decelerations])
num_decelerations = np.array([x[1] for x in decelerations])

# add square speed sums and num decelerations to target_speed_df
target_speed_df['squared_speed_sums'] = squared_speed_sums
target_speed_df['num_decelerations'] = num_decelerations

plt.plot(SPEED_AXIS, squared_speed_sums)
plt.xlabel('Target Speed [kph]')
plt.ylabel('Sum of Squared Speeds [(m/s)^2]')
plt.title('Deceleration vs. Target Speed')
add_footnote(f'Given hot lap delay plus wasted time of {round(HOT_LAP_DELAY_TIME + WASTED_TIME, 2)} h.')
# plot polynomial regression
p = Polynomial.fit(SPEED_AXIS, squared_speed_sums, 2)
plt.plot(SPEED_AXIS, p(SPEED_AXIS), '--', color=MICHIGAN_MAIZE)
add_annotation(f'{round(p.coef[2])}v^2 + {round(p.coef[1])}v + {round(p.coef[0])}',
               100, 30000, x_offset=30, y_offset=-30, ha='left', va='bottom')
plt.xlim([60, 130])
plt.ylim([10000, 50000])
plt.show()

plt.plot(SPEED_AXIS, num_decelerations)
plt.xlabel('Target Speed [kph]')
plt.ylabel('Number of Decelerations')
plt.title('Number of Decelerations vs. Target Speed')
plt.figtext(0.01, -0.03, f'*Given hot lap delay plus wasted time of {round(HOT_LAP_DELAY_TIME + WASTED_TIME, 2)} h.',
            horizontalalignment='left', verticalalignment='bottom', fontsize=8)
plt.xlim([70, 130])
plt.ylim([60, 110])
plt.show()

# plot the above but vs true race time
x_axis = target_speed_df['true_race_time'] / 3600
plt.plot(x_axis, target_speed_df['squared_speed_sums'])
plt.xlim([25, 50])
plt.xlabel('True Race Time [h]')
plt.ylabel('Sum of Squared Speeds [(m/s)^2]')
plt.title('Sum of Squared Speeds vs. Race Time')
add_footnote(f'Given hot lap delay plus wasted time of {round(HOT_LAP_DELAY_TIME + WASTED_TIME, 2)} h.')
# fit a polynomial only to race times between 27 h and 40 h
target_speed_df_fit = target_speed_df[(target_speed_df['true_race_time'] / 3600 > 27) & (target_speed_df['true_race_time'] / 3600 < 40)]
# polyfit with numpy
p = np.polyfit(target_speed_df_fit['true_race_time'] / 3600, target_speed_df_fit['squared_speed_sums'], 2)
plt.plot(target_speed_df_fit['true_race_time'] / 3600, np.polyval(p, target_speed_df_fit['true_race_time'] / 3600), '--', color=MICHIGAN_MAIZE)
add_annotation(f'{round(p[0])}t^2 + {round(p[1])}t + {round(p[2])}',
               34, np.polyval(p,34), x_offset=30, y_offset=30, ha='left', va='bottom')
plt.show()

We also want to calculate the RMS velocity given a target speed:

Code
def get_rms_velocity(route, target_speed_kph, acceleration_mpsps=1, deceleration_mpsps=0.5, acceptable_speeding_kph=0, hot_lap_delay=0, wasted_time=0):
    speed_profile, _, _ = generate_speed_profile(route, target_speed_kph, acceleration_mpsps, deceleration_mpsps, acceptable_speeding_kph, hot_lap_delay)
    # weight by distance, make sure to do the weighting correctly
    # first, calculate the distance between each point
    speed_profile['distance_step'] = speed_profile['distance'].shift(-1) - speed_profile['distance']
    # then, calculate the squared velocity between each point
    speed_profile['squared_velocity'] = speed_profile['speed'] ** 2
    # then, calculate the RMS velocity for the whole profile
    rms_velocity = np.sqrt((speed_profile['squared_velocity'] * speed_profile['distance_step']).sum() / speed_profile['distance_step'].sum())
    return rms_velocity

# plot rms velocity vs target speed
rms_velocities = np.array([get_rms_velocity(route, speed, 1.5, 0.5, 0, HOT_LAP_DELAY_TIME, WASTED_TIME) for speed in SPEED_AXIS])

plt.plot(SPEED_AXIS, rms_velocities * 3.6)
plt.xlabel('Target Speed [kph]')
plt.ylabel('RMS Velocity [kph]')
plt.title('RMS Velocity vs. Target Speed')
add_footnote(f'Given hot lap delay plus wasted time of {round(HOT_LAP_DELAY_TIME + WASTED_TIME, 2)} h.')
plt.xlim([60, 130])
plt.ylim([60, 130])
plt.show()

# add rms velocities to df
target_speed_df['rms_velocity'] = rms_velocities

Code
# fp, slp, eod_locations = generate_speed_profile(route, 105, 1, 0.5, 0)

# slp2 = slp.copy()
# slp2['distance'] = slp['distance'].shift(-1) - 0.001
# slp = pd.concat([slp, slp2])
# slp = slp.sort_values('distance')

# plt.plot(fp['distance'] / 1000, fp['speed'] * 3.6)
# plt.plot(slp['distance'] / 1000, slp['speed_limit'] * 3.6, alpha=0.5)
# plt.scatter([x / 1000 for x in eod_locations], [0] * len(eod_locations), color='red', label='Stops')
# # plot acceleration on other y axis
# plt.ylim([0, 140])
# ax2 = plt.gca().twinx()
# plt.show()

# def calculate_race_time(route, target_speed_kph, acceleration_mpsps=0.5, acceptable_speeding_kph=0.0):
#     target_speed = target_speed_kph / 3.6
#     acceptable_speeding = acceptable_speeding_kph / 3.6
#     a = acceleration_mpsps

#     dist_step = 100
#     num_steps = int(route['distance'].sum() / dist_step + 0.5)
#     distance_profile = np.linspace(0, route['distance'].sum(), num_steps)
#     speed_limit_profile = np.zeros(num_steps)
#     effective_speed_profile = np.zeros(num_steps)

#     # for each step go through, and calculate the effective speed limit
#     # this is the maximum speed that can be achieved at that point
#     v = 0
#     d = 0
#     for i in range(num_steps):
#         d = i * dist_step
#         if d < 10:
#             speed_limit = 40 / 3.6
#         else:
#             speed_limit = route['speed_limit'][route['distance'].cumsum() <= d].iloc[-1]
#         speed_limit_profile[i] = speed_limit
#         # if we're already going the right speed, don't accelerate
#         max_speed = min(target_speed, speed_limit + acceptable_speeding)
#         if abs(v - max_speed) < 0.01:
#             v = max_speed
#             effective_speed_profile[i] = max_speed
#             continue
#         # if we're going too slow, accelerate to the target speed
#         t = - v / a + np.sqrt(v ** 2 / a ** 2 + 2 * dist_step / a)
#         if v + a * t > max_speed:
#             # find t such that we reach this point
#             t_to_max_speed = (max_speed - v) / a
#             d_acc = v * t_to_max_speed + 0.5 * a * t_to_max_speed ** 2
#             t = t_to_max_speed + (dist_step - d_acc) / max_speed
#             v = max_speed
#         else:
#             v = v + a * t
#         effective_speed_profile[i] = v
#     # do same in reverse
#     v = speed_limit_profile[-1]
#     d = race_distance
#     for i in range(num_steps - 1, -1, -1):
#         speed_limit = speed_limit_profile[i]
#         d = race_distance - i * dist_step
#         # if we're already going the right speed, don't accelerate
#         max_speed = min(target_speed, speed_limit + acceptable_speeding)
#         # if segment end condition is 'control_stop' or 'driver_swap'
#         if route['segment_end_condition']
#             max_speed = 0
#         if v == max_speed:
#             effective_speed_profile[i] = min(effective_speed_profile[i], v)
#             continue
#         # if we're going too slow, accelerate to the target speed
#         t = - v / a + np.sqrt(v ** 2 / a ** 2 + 2 * dist_step / a)
#         if v + a * t > max_speed:
#             # find t such that we reach this point
#             t_to_max_speed = (max_speed - v) / a
#             d_acc = v * t_to_max_speed + 0.5 * a * t_to_max_speed ** 2
#             t = t_to_max_speed + (dist_step - d_acc) / max_speed
#             v = max_speed
#         else:
#             v = v + a * t
#         effective_speed_profile[i] = min(effective_speed_profile[i], v)

#     return distance_profile, effective_speed_profile, speed_limit_profile

# dp, esp, slp = calculate_race_time(race_route, 110, 0.5, 0.0)

# plt.plot(dp / 1000, slp)
# plt.plot(dp / 1000, esp)

    # time = 0
    # speed = 0
    # distance = 0
    # segment_index = 0
    # route['distance_traveled'] = route['distance'].cumsum()
    # # TODO: account for stop signs, stop lights, and flashing yellows
    # # TODO: account for slowdowns for control stops, driver swaps, etc.
    # while(distance < route['distance'].sum()):
    #     # get the current segment
    #     while(route.iloc[segment_index]['distance_traveled'] < distance):
    #         segment_index += 1
    #     segment = route.iloc[segment_index]
    #     segment_distance = segment['distance']
    #     segment_distance_traveled = segment['distance_traveled']

    #     limited_target_speed = min(target_speed, segment['speed_limit'] + acceptable_speeding)
    #     # TODO: also have to take into account the need to decelerate for the next segment
    
    #     # check if accelerating, decelerating, or cruising
    #     if speed == limited_target_speed:
    #         acceleration_time = 0
    #         acceleration_distance = 0
    #         time += (segment_distance_traveled - distance) / speed
    #         distance = segment_distance_traveled + 0.001
    #     elif speed < limited_target_speed:
    #         acceleration_time = (limited_target_speed - speed) / acceleration
    #         acceleration_distance = speed * acceleration_time + 0.5 * acceleration * acceleration_time ** 2
    #         time += acceleration_time
    #         distance += acceleration_distance
    #         speed = limited_target_speed
    #         # TODO: account for segment cutoffs
    #     else:
    #         acceleration_time = (speed - limited_target_speed) / acceleration
    #         acceleration_distance = speed * acceleration_time - 0.5 * acceleration * acceleration_time ** 2
    #         time += acceleration_time
    #         distance += acceleration_distance
    #         speed = limited_target_speed
    #         # TODO: account for segment cutoffs

    # return time

# print(calculate_race_time(race_route, 1000, 0.3, 0.0))

Weather Analysis

Drive Cycle Analysis

  • Possible Speed Profiles
  • True Race Time vs. Speed (given Acceleration) ==> Target Speed
  • True Race Time vs. Willingness to Speed @ Target Speed
  • Number of Accelerations
  • Number of Decelerations
  • Irradiance Map (vs. Total Driver Ingress Time)
  • Pointing Times
  • Wind Map, Yaw Distribution, Apparent Wind Speed Distribution
  • Temperature Map
  • Air Density Map
  • v^2 d Weighted Yaw Distribution
  • CdA and Array Power Level Curves

Stops: Start, BOD, EOD, Control Points, Lights, Signs, Flashing Yellows

Load Weather Data

First, we find the weather station for each segment of the route.

Code
# read in route
# TODO(agoettsc): solarset should be a submodule or something like that
weather_stations = pd.read_csv("./data/australia_stations.csv")

# determine the fractional weather station for each segment for interpolation
seg_station_distances = pd.DataFrame(columns=[f"dist_station_{i}" for i in weather_stations["group"]], index=route.index)

def haversine_distance(seg, station):
    seg_coords_rad = (np.deg2rad(seg["start_latitude"]), np.deg2rad(seg["start_longitude"]))
    station_coords_rad = (np.deg2rad(station["lat"]), np.deg2rad(station["lon"]))

    avg_lat = (seg_coords_rad[0] + station_coords_rad[0]) / 2
    r0 = 6378137;
    r1 = 6356752;
    a = (r0 ** 2) * np.cos(avg_lat);
    b = (r1 ** 2) * np.sin(avg_lat);
    c = r0 * np.cos(avg_lat);
    d = r1 * np.sin(avg_lat);
    
    R = np.sqrt((a ** 2 + b ** 2) / (c ** 2 + d ** 2))
    
    haversine_output = np.sin((seg_coords_rad[0] - station_coords_rad[0]) / 2) ** 2 \
        + np.cos(seg_coords_rad[0]) * np.cos(station_coords_rad[0]) \
            *  np.sin((seg_coords_rad[1] - station_coords_rad[1]) / 2) ** 2
            
    arc_length = 2 * np.arcsin(np.sqrt(haversine_output))
    
    return R * arc_length / 1000

for _, weather_station in weather_stations.iterrows():
    seg_station_distances[f"dist_station_{weather_station["group"]}"] = haversine_distance(route, weather_station)

def get_seg_weather_station(seg):
    seg_distances = seg_station_distances.iloc[seg.name]
    closest_stations = np.array([seg_distances[f"dist_station_{weather_station["group"]}"] for _, weather_station in weather_stations.iterrows()]).argpartition(2)[:2] + 1

    weights = np.array([1 / (seg_distances[f"dist_station_{group_idx}"]) for group_idx in closest_stations])
    station = np.sum(weights * closest_stations) / np.sum(weights)

    return station

route["weather_station"] = route.apply(get_seg_weather_station, axis=1)

Then, we generate routes annotated with the weather the vehicle experiences for a given speed profile.

Code
WEATHER_FILES = glob.glob("data/weather/october/*.csv")
WEATHER_FILES_MEDIAN = glob.glob("data/weather/august-median/*.csv")

def parse_weather_file(weather_file_path):
    weather = pd.read_csv(weather_file_path)
    # weather_xy = weather[["weather_group", "period_time_unix"]].to_numpy()
    weather = weather.sort_values(by=["weather_group", "period_time_unix"])
    weather_groups = weather["weather_group"].unique()
    weather_times = weather["period_time_unix"].unique()
    weather_xy = [
        weather_groups,
        weather_times,
    ]

    # return weather["period_time_unix"].min(), weather["period_time_unix"].max(), {
    #     "dhi": LinearNDInterpolator(weather_xy, weather["dhi"]),
    #     "dni": LinearNDInterpolator(weather_xy, weather["dni"]),
    #     "ghi": LinearNDInterpolator(weather_xy, weather["ghi"]),
    #     "wind_velocity_10m_ns": LinearNDInterpolator(weather_xy, weather["wind_velocity_10m_ns"]),
    #     "wind_velocity_10m_ew": LinearNDInterpolator(weather_xy, weather["wind_velocity_10m_ew"]),
    #     "air_temp_2m": LinearNDInterpolator(weather_xy, weather["air_temp_2m"]),
    #     "surface_pressure": LinearNDInterpolator(weather_xy, weather["surface_pressure"]),
    #     "air_density": LinearNDInterpolator(weather_xy, weather["air_density"]),
    # }

    return weather["period_time_unix"].min(), weather["period_time_unix"].max(), {
        "dhi": RegularGridInterpolator(weather_xy, weather["dhi"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "dni": RegularGridInterpolator(weather_xy, weather["dni"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "ghi": RegularGridInterpolator(weather_xy, weather["ghi"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "wind_velocity_10m_ns": RegularGridInterpolator(weather_xy, weather["wind_velocity_10m_ns"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "wind_velocity_10m_ew": RegularGridInterpolator(weather_xy, weather["wind_velocity_10m_ew"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "air_temp_2m": RegularGridInterpolator(weather_xy, weather["air_temp_2m"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "surface_pressure": RegularGridInterpolator(weather_xy, weather["surface_pressure"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
        "air_density": RegularGridInterpolator(weather_xy, weather["air_density"].ravel().reshape(len(weather_groups), len(weather_times), order="C")),
    }

weathers = [parse_weather_file(f) for f in WEATHER_FILES]

Then, we use a schedule and a speed profile to find the time that we are at each segment. Using that time, we look up weather for each segment.

Code
SCHEDULE_FILES = glob.glob("data/schedule/october/*.toml")
AEROBODY_COP_Z = 0.558
SURFACE_ROUGHNESS = 1e-3
WIND_REFERENCE_HEIGHT = 10
wind_speed_multiplier = np.log(AEROBODY_COP_Z / SURFACE_ROUGHNESS) / np.log(WIND_REFERENCE_HEIGHT / SURFACE_ROUGHNESS)

def parse_schedule_file(schedule_file_path):
    with open(schedule_file_path, "rb") as f:
        schedule_toml = tomli.load(f)
        return {
            "day_starts": [pd.Timestamp(day_schedule["race_start"]) for day_schedule in schedule_toml["schedule"]],
            "day_ends": [pd.Timestamp(day_schedule["race_end"]) for day_schedule in schedule_toml["schedule"]],
        }

def get_time_for_seg_or_nan(seg, profile, schedule):
    matching_profile_segs = profile[seg["cum_distance"] == profile["distance"]]
    if len(matching_profile_segs) > 0:
        profile_seg_time = matching_profile_segs.iloc[0]["time"]
        profile_seg_day = int(np.floor(profile_seg_time / (9 * 3600)))
        return pd.Timedelta(seconds=profile_seg_time - (9 * 3600 * profile_seg_day)) + schedule["day_starts"][profile_seg_day]

    return float('nan')

def apply_weather_to_speed_profile(target_speed_kph, weather, schedule):
    res = route.copy()
    res = res[res["segment_type"] != "marshaling"]

    # first, we need to resolve the time at each segment
    # we assign speed based on the segment of the profile and time based on if we used that exact segment in our profile
    res["cum_distance"] = res["distance"].cumsum().shift(1)
    res.loc[0, "cum_distance"] = 0
    # TODO(agoettsc): this is problematic, need to deal with accel
    # res["speed"] = res.apply(lambda seg: profile[profile["distance"] >= seg["cum_distance"]].iloc[0]["speed"], axis=1)
    # res.loc[0, "speed"] = profile.loc[1, "speed"]
    # res["localtime"] = res.apply(lambda seg: get_time_for_seg_or_nan(seg, profile, schedule), axis=1)
    # res["time_taken"] = res["distance"] / res["speed"] 

    # # offset times based on previous segments to fill gaps
    # for i, seg in res.iterrows():
    #     if pd.isna(seg["localtime"]):
    #         prev_t = res.loc[i - 1, "localtime"]
    #         next_t = prev_t + pd.Timedelta(seconds=res.loc[i - 1, "time_taken"])
    #         for j, day_end in enumerate(schedule["day_ends"]):
    #             if prev_t < day_end and next_t > day_end:
    #                 next_t = schedule["day_starts"][j + 1] + (next_t - day_end)
    #         res.loc[i, "localtime"] = next_t

    res["speed"] = np.minimum(res["speed_limit"], target_speed_kph / 3.6)
    res["time_taken"] = res["distance"] / res["speed"] 
    res.loc[0, "localtime"] = schedule["day_starts"][0] + pd.Timedelta(hours=HOT_LAP_DELAY_TIME)

    # offset times based on previous segments to fill gaps
    for i, seg in res.iterrows():
        if i == 0:
            continue
        prev_t = res.loc[i - 1, "localtime"]
        next_t = prev_t + pd.Timedelta(seconds=res.loc[i - 1, "time_taken"])
        if res.loc[i - 1, "segment_end_condition"] == "control_stop":
            next_t += pd.Timedelta(minutes=30)
        for j, day_end in enumerate(schedule["day_ends"]):
            if prev_t < day_end and next_t > day_end:
                next_t = schedule["day_starts"][j + 1] + (next_t - day_end)
        res.loc[i, "localtime"] = next_t

    for k, spline in weather.items():
        res[k] = res.apply(lambda seg: spline([seg["weather_station"], seg["localtime"].timestamp()])[0], axis=1)

    # now that we have time, we get solar position
    solpos_df = pvlib.solarposition.get_solarposition(res["localtime"], res["start_latitude"], res["start_longitude"], altitude=res["elevation"])
    res["solar_zenith"] = np.deg2rad(solpos_df["apparent_zenith"].to_numpy())
    res["solar_azimuth"] = -np.deg2rad(solpos_df["azimuth"].to_numpy())
    res["vehicle_relative_azimuth"] = (res["heading"] + res["solar_azimuth"]) % (2 * np.pi)

    # now we resolve the wind vector
    res["wind_speed"] = wind_speed_multiplier * np.sqrt(res["wind_velocity_10m_ns"] ** 2 + res["wind_velocity_10m_ew"] ** 2)
    # heading coordinate system is north at 0 degrees, cw positive, so east at 90 degrees
    # so, to correctly resolve an angle, east-west wind is the y value and north-south wind is the x value
    # offset by 180 because wind speed is positive when wind is from west/south
    res["wind_heading"] = np.atan2(-res["wind_velocity_10m_ew"], -res["wind_velocity_10m_ns"])

    # now, we have two polar vectors that we can add to get the world relative wind experienced by the vehicle
    r1 = res["speed"]
    r2 = res["wind_speed"]
    theta1 = res["heading"]
    theta2 = res["wind_heading"]
    res["apparent_wind_speed"] = np.sqrt(r1 ** 2 + r2 ** 2 + 2 * r1 * np.cos(theta2 - theta1))

    # this would be theta_1 + atan2(...) but we want vehicle relative yaw, so theta_1 = heading will cancel out
    res["yaw"] = np.atan2(r2 * np.sin(theta2 - theta1), r1 + r2 * np.cos(theta2 - theta1))

    return res

def get_races_for_speed(speed):
    # speed_profile = generate_speed_profile(route, speed, hot_lap_delay=HOT_LAP_DELAY_TIME)[0]

    schedules = {re.match(r".*/Schedule(.*)\.toml", s)[1]: parse_schedule_file(s) for s in SCHEDULE_FILES}
    races = {}
    for k, schedule in schedules.items():
        weather = next(w for min_t, max_t, w in weathers if min_t < pd.Timestamp.timestamp(schedule["day_starts"][0]) < max_t)
        races[k] = apply_weather_to_speed_profile(speed, weather, schedule)

    return races

races = get_races_for_speed(target_speed)
race_2022 = races["2022"]

Now that we have segment speeds, we plot speed and position over time

Code
fig, ax = plt.subplots()

ax.plot(race_2022["localtime"], race_2022["speed"] * 3.6)
ax.set_title("Speed over time")
ax.set_ylabel("Speed (kph)")
ax.set_xlabel("Time")

fit_to_gslide()

Code
fig, ax = plt.subplots()

ax.plot(race_2022["localtime"], race_2022["weather_station"])
ax.set_title("Cumulative distance over time")
ax.set_ylabel("Distance (km)")
ax.set_xlabel("Time")

fit_to_gslide()

Solar Position and Irradiance

First, we plot DNI, DHI, and GHI over time.

Code
fig, ax = plt.subplots()

ax.plot(race_2022["localtime"], race_2022["dni"], label="dni")
ax.plot(race_2022["localtime"], race_2022["dhi"], label="dhi")
ax.plot(race_2022["localtime"], race_2022["ghi"], label="ghi")
ax.set_title("Irradiance during 4 days in August 2022")
ax.set_ylabel("Irradiance ($W/m^2$)")
ax.set_xlabel("Time")

fig.legend()
fit_to_gslide()

Then, we plot the position of the sun relative to the vehicle over a 4 day race.

Code
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.scatter(race_2022["vehicle_relative_azimuth"], race_2022["solar_zenith"] * 180 / np.pi)
ax.set_theta_zero_location('N')
ax.set_rmax(90)
ax.set_rticks([0, 30, 60, 90], labels=['0', '30', '60', '90'])
ax.set_title("Vehicle relative solar position (October 2022)")

fit_to_gslide()

As a KDE plot:

Code
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
kde = seaborn.kdeplot(x=race_2022["vehicle_relative_azimuth"], y=race_2022["solar_zenith"] * 180 / np.pi, weights=race_2022["dni"] * race_2022["time_taken"], fill=True, cbar=True)
# fig.colorbar(kde)
ax.set_theta_zero_location('N')
ax.set_rmax(90)
ax.set_rticks([0, 30, 60, 90], labels=['0', '30', '60', '90'])
ax.set_title("Vehicle relative solar position (October 2022 w/ 2007-2022 weather)")
ax.set_xlabel("")
ax.set_ylabel("")

fit_to_gslide()

We also care about how often the sun is forward of horizontal:

Code
sun_forwards_of_of_horizontal_Jpersqm = (((race_2022["vehicle_relative_azimuth"] < (np.pi / 2)) | (race_2022["vehicle_relative_azimuth"] > (3 * np.pi / 2))) * race_2022["dni"] * race_2022["time_taken"]).sum()
sun_forwards_of_of_horizontal_Jpersqm_percent = (sun_forwards_of_of_horizontal_Jpersqm / (race_2022["dni"] * race_2022["time_taken"]).sum()) * 100

print(f"{sun_forwards_of_of_horizontal_Jpersqm_percent:.2f}% of irradiance is collected forwards of horizontal")
31.18% of irradiance is collected forwards of horizontal

We would like to determine the optimal orientation of solar cells. First, we generate 2000 sample normal vectors. Then, we calculate the irradiance on them at each route step and find specific power collected in \(W/m^2\) as a function of orientation.

Code
SAMPLE_CELL_VECTORS = 2000
HEATMAP_RES_AZ = 1000
HEATMAP_RES_ZEN = 500
AZIMUTH_BOUNDS = (0, np.pi * 2)
ZENITH_BOUNDS = (0, np.pi / 2)

def get_cell_normal_spline(race_profile):
    race_profile = race_profile.copy()
    def make_hemisphere_normals(num_points):
        point_normals = [(0, 0)]

        num_equator_points = 2 * math.ceil(math.sqrt(num_points))
        point_normals += [(2 * math.pi * i / num_equator_points, math.pi / 2) for i in range(num_equator_points)]

        num_fibonacci_points = num_points - num_equator_points - 1
        phi = math.pi * (math.sqrt(5) - 1)
        for i in range(num_fibonacci_points):
            z = 1 - (i / num_fibonacci_points)
            radius = math.sqrt(1 - z * z)
            theta = phi * i
            x = math.cos(theta) * radius
            y = math.sin(theta) * radius
            point_normals.append((math.atan2(y, x) % (2 * math.pi), math.acos(z)))

        df = pd.DataFrame(point_normals, columns=["static_cell_azimuth", "static_cell_zenith"])
        # scale up rho for the cell normal vectors to produce a convex hull for interpolation slightly larger than the unit
        # sphere so that all points on the unit sphere are inside the convex hull, even when the facets of it are triangular
        RHO_EPSILON = 0.001
        RHO = 1 + RHO_EPSILON
        df["x"] = RHO * np.sin(df["static_cell_zenith"]) * np.cos(df["static_cell_azimuth"])
        df["y"] = RHO * np.sin(df["static_cell_zenith"]) * np.sin(df["static_cell_azimuth"])
        df["z"] = RHO * np.cos(df["static_cell_zenith"])

        return df

    cell_normals = make_hemisphere_normals(SAMPLE_CELL_VECTORS)

    def get_cell_cos_incident_angle(cell):
        cosine_incident_angle = np.cos(race_profile["solar_zenith"]) * np.cos(cell_normals.loc[cell, "static_cell_zenith"]) + \
                np.sin(race_profile["solar_zenith"]) * np.sin(cell_normals.loc[cell, "static_cell_zenith"]) * \
                    (np.cos(race_profile["vehicle_relative_azimuth"]) * np.cos(cell_normals.loc[cell, "static_cell_azimuth"])
                    + np.sin(race_profile["vehicle_relative_azimuth"]) * np.sin(cell_normals.loc[cell, "static_cell_azimuth"]))
        cosine_incident_angle[cosine_incident_angle < 0] = 0
        return cosine_incident_angle

    encapsulant_eff_raw = pd.read_csv("data/encapsulant/mito-skyiar-2025.csv").iloc[::-1]
    encapsulant_eff_interp = CubicSpline(encapsulant_eff_raw["cos_incident_angle"].to_numpy(), encapsulant_eff_raw["encapsulant_efficiency"])

    def get_cell_beam_irradiance_seen(cell):
        cosine_incident_angle = get_cell_cos_incident_angle(cell)
        return cosine_incident_angle * race_profile["dni"] * encapsulant_eff_interp(cosine_incident_angle)
    
    def get_sky_diffuse_irradiance(cell):
        isotropic_diffuse_irradiance = race_profile["dhi"] * (1 + np.cos(cell_normals.loc[cell, "static_cell_zenith"])) / 2
        circumsolar_horizon_diffuse_irradiance = race_profile["ghi"] * (0.012 * np.rad2deg(race_profile["solar_zenith"]) - 0.04) * (1 - np.cos(cell_normals.loc[cell, "static_cell_zenith"])) / 2
        return isotropic_diffuse_irradiance + circumsolar_horizon_diffuse_irradiance

    def get_ground_reflected_irradiance(cell):
        return race_profile["ghi"] * (1 - np.cos(cell_normals.loc[cell, "static_cell_zenith"])) / 2 * 0.1

    for cell_id in cell_normals.index:
        race_profile[f"cell-{cell_id}-beam-irradiance-collected"] = get_cell_beam_irradiance_seen(cell_id)
        race_profile[f"cell-{cell_id}-sky-diffuse-irradiance-collected"] = get_sky_diffuse_irradiance(cell_id)
        race_profile[f"cell-{cell_id}-ground-reflected-irradiance-collected"] = get_ground_reflected_irradiance(cell_id)
        race_profile[f"cell-{cell_id}-irradiance-collected"] = race_profile[f"cell-{cell_id}-beam-irradiance-collected"] + race_profile[f"cell-{cell_id}-sky-diffuse-irradiance-collected"] + race_profile[f"cell-{cell_id}-ground-reflected-irradiance-collected"]
        cell_normals.loc[cell_id, "avg_cell_beam_irradiance"] = np.average(race_profile[f"cell-{cell_id}-beam-irradiance-collected"], weights=race_profile["time_taken"])
        cell_normals.loc[cell_id, "avg_sky_diffuse_irradiance"] = np.average(race_profile[f"cell-{cell_id}-sky-diffuse-irradiance-collected"], weights=race_profile["time_taken"])
        cell_normals.loc[cell_id, "avg_ground_reflected_irradiance"] = np.average(race_profile[f"cell-{cell_id}-ground-reflected-irradiance-collected"], weights=race_profile["time_taken"])
        cell_normals.loc[cell_id, "avg_irradiance_collected"] = np.average(race_profile[f"cell-{cell_id}-irradiance-collected"], weights=race_profile["time_taken"])

    zenith_linspace = np.linspace(*ZENITH_BOUNDS, HEATMAP_RES_ZEN)
    sample_points_azimuth, sample_points_zenith = np.meshgrid(np.linspace(*AZIMUTH_BOUNDS, HEATMAP_RES_AZ), zenith_linspace)

    total_time_s = np.sum(race_profile["time_taken"])
    print(f"Total racetime is {total_time_s / 3600} hrs")

    best_cell = cell_normals.iloc[cell_normals["avg_irradiance_collected"].idxmax()]
    best_cell_over_6m2 = best_cell['avg_irradiance_collected'] * 6
    print(f"Best orientation collects {best_cell['avg_irradiance_collected']:.2f} W/m^2 ({best_cell_over_6m2:.2f} W if this was the entire array), azimuth: {np.rad2deg(best_cell["static_cell_azimuth"]):.2f} deg, zenith: {np.rad2deg(best_cell["static_cell_zenith"]):.2f} deg")

    flat_cell = cell_normals.iloc[0]
    flat_cell_over_6m2 = flat_cell['avg_irradiance_collected'] * 6
    print(f"Flat cell collects {flat_cell['avg_irradiance_collected']:.2f} W/m^2 ({flat_cell_over_6m2:.2f} W if this was the entire array), azimuth: {np.rad2deg(flat_cell["static_cell_azimuth"]):.2f} deg zenith: {np.rad2deg(flat_cell["static_cell_zenith"]):.2f} deg")

    return (
        LinearNDInterpolator(cell_normals[["x", "y", "z"]], cell_normals["avg_irradiance_collected"]), 
        LinearNDInterpolator(cell_normals[["x", "y", "z"]], cell_normals["avg_cell_beam_irradiance"]), 
        LinearNDInterpolator(cell_normals[["x", "y", "z"]], cell_normals["avg_sky_diffuse_irradiance"]), 
        LinearNDInterpolator(cell_normals[["x", "y", "z"]], cell_normals["avg_ground_reflected_irradiance"]), 
        best_cell,
    )

specific_irradiance_spline, specific_beam_irradiance_spline, specific_sky_diffuse_spline, specific_ground_reflected_spline, best_cell = get_cell_normal_spline(pd.concat(races.values()))

zenith_linspace = np.linspace(*ZENITH_BOUNDS, HEATMAP_RES_ZEN)
sample_points_azimuth, sample_points_zenith = np.meshgrid(np.linspace(*AZIMUTH_BOUNDS, HEATMAP_RES_AZ), zenith_linspace)

sample_points_x = np.sin(sample_points_zenith) * np.cos(sample_points_azimuth)
sample_points_y = np.sin(sample_points_zenith) * np.sin(sample_points_azimuth)
sample_points_z = np.cos(sample_points_zenith)
irradiance_collected_sampled = specific_irradiance_spline(sample_points_x, sample_points_y, sample_points_z)

fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
pc = ax.pcolormesh(sample_points_azimuth, sample_points_zenith * 180 / np.pi, irradiance_collected_sampled, shading='auto')
fig.colorbar(pc)
# ax.scatter(best_cell["static_cell_azimuth"], best_cell["static_cell_zenith"] * 180 / np.pi, label="best attitude")
ax.set_theta_zero_location('N')
ax.set_rmax(90)
# ax.set_rticks([0, 30, 60, 90], labels=['0', '30', '60', '90'])
ax.set_title(r"Average cell irradiance ($W/m^2$) collected at attitude (October 2022 w/ 2007-2022 weather)")
ax.scatter(best_cell["static_cell_azimuth"], best_cell["static_cell_zenith"] * 180 / np.pi, label="best attitude")

fig.legend(loc="lower left")

# fit_to_gslide()
Total racetime is 1508.128394329492 hrs
Best orientation collects 694.74 W/m^2 (4168.42 W if this was the entire array), azimuth: 174.70 deg, zenith: 11.45 deg
Flat cell collects 687.50 W/m^2 (4125.02 W if this was the entire array), azimuth: 0.00 deg zenith: 0.00 deg

It is also of interest what the section of the plot representing a cell tilting along the X axis [TODO(agoettsc): this could be worded better] of the vehicle looks like.

Code
SECTION_AZIMUTH_DEG = 180
SECTION_AZIMUTH = np.deg2rad(SECTION_AZIMUTH_DEG)
section_zenith_linspace = np.linspace(-ZENITH_BOUNDS[1], ZENITH_BOUNDS[1], HEATMAP_RES_ZEN * 2)
section_x_samples = np.sin(section_zenith_linspace) * np.cos(SECTION_AZIMUTH)
section_y_samples = np.full(HEATMAP_RES_ZEN * 2, np.sin(SECTION_AZIMUTH))
section_z_samples = np.cos(section_zenith_linspace)
section_zenith_deg_plot_x = np.rad2deg(section_zenith_linspace)
section_irradiance = specific_irradiance_spline(section_x_samples, section_y_samples, section_z_samples)
section_max_index = np.argmax(section_irradiance)
section_max_irradiance = section_irradiance[section_max_index]
section_max_irradiance_over_6m2 = section_max_irradiance * 6
print(f"Best cell along azimuth {SECTION_AZIMUTH_DEG} deg collects {section_irradiance[section_max_index]:.2f} W/m^2 ({section_max_irradiance_over_6m2:.2f} W if this was the entire array), zenith: {section_zenith_deg_plot_x[section_max_index]:.2f} deg")

fig = plt.figure()
section_ax = fig.add_subplot()
section_ax.plot(section_zenith_deg_plot_x, section_irradiance, label="total irradiance collected")
section_ax.plot(section_zenith_deg_plot_x, specific_beam_irradiance_spline(section_x_samples, section_y_samples, section_z_samples), label="beam irradiance collected")
section_ax.plot(section_zenith_deg_plot_x, specific_sky_diffuse_spline(section_x_samples, section_y_samples, section_z_samples), label="sky diffuse irradiance collected")
section_ax.plot(section_zenith_deg_plot_x, specific_ground_reflected_spline(section_x_samples, section_y_samples, section_z_samples), label="ground reflected irradiance collected")
# section_ax.legend()
section_ax.set_title(f"Irradiance collected by cells oriented along the x-axis of the vehicle")
section_ax.set_xlabel("Pitch (deg)")
section_ax.set_ylabel(r"Irradiance ($W/m^2$)")
ylim = section_ax.get_ylim()
section_ax.plot([0, 0], [-1000, 1000], '--', color=ANGELL_HALL_ASH)
section_ax.set_ylim(ylim)

fit_to_gslide()
Best cell along azimuth 180 deg collects 694.57 W/m^2 (4167.44 W if this was the entire array), zenith: 10.18 deg

Wind Analysis

First, we plot a histogram of yaw frequencies.

Code
races_df = pd.concat(races.values())

races_df["yaw_weight"] = (races_df["apparent_wind_speed"] ** 2) * races_df["distance"]
rms_apparent_wind_speed = np.sqrt(np.sum(races_df["yaw_weight"]) / np.sum(races_df["distance"]))
weight_denom = np.sum(races_df["yaw_weight"])

counts, bins = np.histogram(np.rad2deg(races_df["yaw"]), weights=races_df["yaw_weight"] / weight_denom, bins=401, range=(-20.05, 20.05))
fig = plt.figure()
ax = fig.add_subplot()
ax.bar(bins[:-1], counts)
ax.set_title("Apparent yaw frequency over race days in October (2007-2022)")
ax.set_xlabel("Yaw angle (deg)")
ax.set_ylabel("Frequency")
Text(0, 0.5, 'Frequency')

Then, we get the RMS apparent wind speed at different speed values

Code
RMS_WIND_SPEED_SWEEP_RANGE_KPH = (80, 130)
RMS_WIND_SAMPLES = 10
def get_rms_apparent_wind_speed(speed): 
    races_at_speed = get_races_for_speed(speed)
    races_df = pd.concat(races_at_speed.values())
    races_df["yaw_weight"] = (races_df["apparent_wind_speed"] ** 2) * races_df["distance"]
    return np.sqrt(np.sum(races_df["yaw_weight"]) / np.sum(races_df["distance"]))

target_speeds_kph = np.linspace(*RMS_WIND_SPEED_SWEEP_RANGE_KPH, RMS_WIND_SAMPLES)
rms_apparent_wind_speeds = np.fromiter((get_rms_apparent_wind_speed(s) for s in target_speeds_kph), float, count=-1)

fig, ax = plt.subplots()
ax.plot(target_speeds_kph, rms_apparent_wind_speeds * 3.6)
ax.set_title("RMS apparent wind speed vs. target speed")
ax.set_xlabel("Target speed [kph]")
ax.set_ylabel("RMS apparent wind speed [kph]")
ax.set_xlim([80, 130])
ax.set_ylim([80, 130])
fit_to_gslide()

Target Race Time Analysis

Concept Analysis

Code
# CdA on x-axis, using tex
# Array Energy In on y-axis
# Level curves at different racetimes [4d - 1h, 4d, 4d + 1h, 4d + 2h, etc]

def get_array_cda_from_trt(official_race_time, cda_axis):
    true_race_time = official_race_time - (HOT_LAP_DELAY_TIME + WASTED_TIME) * 3600
    # for a balanced energy budget
    OTHER_ENERGY_IN = 2942 + 3672 + 9653
    OTHER_ENERGY_OUT = 30805 - 18691
    NOMINAL_MPPT_EFFICIENCY = 0.98
    NOMINAL_AIR_DENSITY = 1.159
    # calculate the array power and CdA required to achieve true_race_time
    # return the array power and CdA
    # now determine array power and CdA which balance this budget
    # fix CdA, find array power
    array_power_axis = []
    for cda in cda_axis:
        f = interp1d(target_speed_df['true_race_time'], target_speed_df['rms_velocity'])
        rms_velocity = f(true_race_time)
        rms_wind_velocity = rms_velocity + 3
        drag_energy = 0.5 * NOMINAL_AIR_DENSITY * cda / 1000 * rms_wind_velocity ** 2 * race_distance / 3600
        print(cda, drag_energy)
        needed_array_energy = drag_energy + OTHER_ENERGY_OUT - OTHER_ENERGY_IN
        # use solver to determine array power
        array_power = needed_array_energy / ((true_race_time + WASTED_TIME) / 3600) / NOMINAL_MPPT_EFFICIENCY
        array_power_axis.append(array_power)
    return array_power_axis
fig = plt.figure()

plt.xlabel(r'$C_d A$ ($m^2$/1000)')
plt.xlim([30, 100])

plt.ylabel('Array Energy In [W]')
# plt.ylim([24, 40])

plt.title('Concept Selection Status')

# plot level curves
race_times = ['4d', '4d + 3h', '4d + 6h', '5d']
official_race_times = 1/3 + np.array([31, 34, 37, 40])
cda_axis = np.linspace(30, 100, 71)

for i in range(len(race_times)):
    array_power_axis = get_array_cda_from_trt(official_race_times[i] * 3600, cda_axis)
    p = plt.plot(cda_axis, array_power_axis, color=MICHIGAN_BLUE, alpha=0.75)
    # make it dashed if official_race_time != 31 + 1/3
    if official_race_times[i] != 31 + 1/3:
        p[0].set_linestyle('--')
    # add label at end of the line
    plt.text(cda_axis[-1], array_power_axis[-1], ' ' + race_times[i], color=MICHIGAN_BLUE, va='center', ha='left')


# plot different models of car
# scatter plots with specialized colors
def plot_concept_car(cda, array_power, color, label=None):
    plt.scatter([cda], [array_power], color=color, label=label, s=70, edgecolors='black', linewidths=0.5, alpha=1 if label is not None else 0.5)


plot_concept_car(70.93, 858.40, color=MICHIGAN_MAIZE, label='Monohull')
plot_concept_car(73.45, 870.50, color=MICHIGAN_MAIZE) # Monohull
plot_concept_car(90.90, 928.07, color=LAW_QUAD_STONE, label='Teardrop')

SIZE=1
# square shape
plt.scatter([71], [640], color=ANGELL_HALL_ASH, label='Astrum (Scaled)', s=SIZE, edgecolors='black', linewidths=0.5, marker='s')
plt.scatter([45], [600], color=TAUBMAN_TEAL, label='Infinite (Scaled)', s=SIZE, edgecolors='black', linewidths=0.5, marker='s')

# display legend only for labeled items
plt.legend()

fit_to_gslide()

# set dpi to 600
plt.savefig('concept_selection.png', dpi=600)

plt.show()
30.0 13527.500850442771
31.0 13978.41754545753
32.0 14429.33424047229
33.0 14880.250935487049
34.0 15331.167630501803
35.0 15782.084325516566
36.0 16233.001020531327
37.0 16683.917715546086
38.0 17134.834410560845
39.0 17585.751105575604
40.0 18036.66780059036
41.0 18487.58449560512
42.0 18938.50119061988
43.0 19389.417885634637
44.0 19840.3345806494
45.0 20291.25127566416
46.0 20742.167970678915
47.0 21193.084665693674
48.0 21644.001360708437
49.0 22094.918055723192
50.0 22545.834750737955
51.0 22996.751445752714
52.0 23447.66814076747
53.0 23898.58483578223
54.0 24349.501530796988
55.0 24800.418225811747
56.0 25251.3349208265
57.0 25702.251615841265
58.0 26153.168310856025
59.0 26604.08500587078
60.0 27055.001700885543
61.0 27505.9183959003
62.0 27956.83509091506
63.0 28407.75178592982
64.0 28858.66848094458
65.0 29309.58517595934
66.0 29760.501870974098
67.0 30211.418565988854
68.0 30662.335261003605
69.0 31113.251956018375
70.0 31564.16865103313
71.0 32015.08534604789
72.0 32466.002041062653
73.0 32916.91873607741
74.0 33367.83543109217
75.0 33818.75212610693
76.0 34269.66882112169
77.0 34720.585516136445
78.0 35171.50221115121
79.0 35622.41890616596
80.0 36073.33560118072
81.0 36524.25229619548
82.0 36975.16899121024
83.0 37426.085686225
84.0 37877.00238123976
85.0 38327.91907625452
86.0 38778.835771269274
87.0 39229.75246628404
88.0 39680.6691612988
89.0 40131.58585631355
90.0 40582.50255132832
91.0 41033.41924634307
92.0 41484.33594135783
93.0 41935.2526363726
94.0 42386.16933138735
95.0 42837.08602640211
96.0 43288.00272141687
97.0 43738.91941643163
98.0 44189.836111446384
99.0 44640.75280646115
100.0 45091.66950147591
30.0 11398.407669950646
31.0 11778.354592282334
32.0 12158.301514614019
33.0 12538.248436945707
34.0 12918.195359277393
35.0 13298.142281609082
36.0 13678.089203940774
37.0 14058.036126272462
38.0 14437.98304860415
39.0 14817.929970935835
40.0 15197.876893267525
41.0 15577.823815599213
42.0 15957.770737930905
43.0 16337.71766026259
44.0 16717.664582594276
45.0 17097.611504925964
46.0 17477.558427257653
47.0 17857.50534958934
48.0 18237.452271921033
49.0 18617.399194252717
50.0 18997.346116584406
51.0 19377.293038916094
52.0 19757.239961247782
53.0 20137.18688357947
54.0 20517.133805911155
55.0 20897.080728242847
56.0 21277.02765057453
57.0 21656.97457290622
58.0 22036.92149523791
59.0 22416.868417569596
60.0 22796.81533990129
61.0 23176.762262232973
62.0 23556.70918456467
63.0 23936.65610689635
64.0 24316.603029228037
65.0 24696.549951559733
66.0 25076.496873891414
67.0 25456.443796223102
68.0 25836.390718554787
69.0 26216.337640886482
70.0 26596.284563218163
71.0 26976.23148554985
72.0 27356.178407881547
73.0 27736.125330213228
74.0 28116.072252544924
75.0 28496.019174876605
76.0 28875.9660972083
77.0 29255.91301953999
78.0 29635.85994187167
79.0 30015.806864203365
80.0 30395.75378653505
81.0 30775.70070886674
82.0 31155.647631198426
83.0 31535.594553530118
84.0 31915.54147586181
85.0 32295.488398193495
86.0 32675.43532052518
87.0 33055.38224285687
88.0 33435.32916518855
89.0 33815.276087520244
90.0 34195.22300985193
91.0 34575.16993218362
92.0 34955.116854515305
93.0 35335.063776847
94.0 35715.01069917868
95.0 36094.95762151037
96.0 36474.904543842065
97.0 36854.85146617374
98.0 37234.798388505435
99.0 37614.74531083712
100.0 37994.69223316881
30.0 9769.986854442584
31.0 10095.653082924004
32.0 10421.319311405423
33.0 10746.985539886842
34.0 11072.65176836826
35.0 11398.317996849679
36.0 11723.984225331102
37.0 12049.65045381252
38.0 12375.31668229394
39.0 12700.982910775358
40.0 13026.649139256775
41.0 13352.315367738196
42.0 13677.981596219619
43.0 14003.647824701038
44.0 14329.314053182456
45.0 14654.980281663873
46.0 14980.646510145294
47.0 15306.312738626713
48.0 15631.978967108134
49.0 15957.645195589552
50.0 16283.311424070971
51.0 16608.977652552392
52.0 16934.64388103381
53.0 17260.31010951523
54.0 17585.97633799665
55.0 17911.642566478073
56.0 18237.308794959485
57.0 18562.975023440908
58.0 18888.641251922327
59.0 19214.307480403742
60.0 19539.97370888517
61.0 19865.639937366588
62.0 20191.306165848007
63.0 20516.972394329423
64.0 20842.638622810846
65.0 21168.304851292265
66.0 21493.971079773684
67.0 21819.6373082551
68.0 22145.30353673652
69.0 22470.969765217942
70.0 22796.635993699358
71.0 23122.30222218078
72.0 23447.968450662203
73.0 23773.63467914362
74.0 24099.30090762504
75.0 24424.967136106457
76.0 24750.63336458788
77.0 25076.299593069296
78.0 25401.965821550715
79.0 25727.632050032134
80.0 26053.29827851355
81.0 26378.964506994973
82.0 26704.630735476392
83.0 27030.29696395781
84.0 27355.963192439238
85.0 27681.629420920654
86.0 28007.295649402076
87.0 28332.961877883485
88.0 28658.62810636491
89.0 28984.29433484633
90.0 29309.960563327746
91.0 29635.62679180917
92.0 29961.293020290588
93.0 30286.95924877201
94.0 30612.625477253427
95.0 30938.29170573485
96.0 31263.95793421627
97.0 31589.624162697684
98.0 31915.290391179104
99.0 32240.95661966052
100.0 32566.622848141942
30.0 8495.548308716461
31.0 8778.733252340344
32.0 9061.918195964226
33.0 9345.10313958811
34.0 9628.288083211988
35.0 9911.47302683587
36.0 10194.657970459753
37.0 10477.842914083636
38.0 10761.027857707517
39.0 11044.2128013314
40.0 11327.397744955282
41.0 11610.582688579165
42.0 11893.767632203046
43.0 12176.95257582693
44.0 12460.13751945081
45.0 12743.32246307469
46.0 13026.507406698573
47.0 13309.692350322455
48.0 13592.87729394634
49.0 13876.062237570219
50.0 14159.247181194101
51.0 14442.432124817984
52.0 14725.617068441867
53.0 15008.802012065747
54.0 15291.98695568963
55.0 15575.171899313513
56.0 15858.35684293739
57.0 16141.541786561274
58.0 16424.72673018516
59.0 16707.911673809038
60.0 16991.096617432922
61.0 17274.281561056803
62.0 17557.466504680688
63.0 17840.65144830457
64.0 18123.836391928453
65.0 18407.021335552337
66.0 18690.20627917622
67.0 18973.391222800095
68.0 19256.576166423976
69.0 19539.76111004786
70.0 19822.94605367174
71.0 20106.130997295622
72.0 20389.315940919507
73.0 20672.500884543388
74.0 20955.685828167272
75.0 21238.870771791153
76.0 21522.055715415034
77.0 21805.24065903892
78.0 22088.4256026628
79.0 22371.61054628668
80.0 22654.795489910564
81.0 22937.980433534445
82.0 23221.16537715833
83.0 23504.35032078221
84.0 23787.53526440609
85.0 24070.720208029976
86.0 24353.90515165386
87.0 24637.090095277734
88.0 24920.27503890162
89.0 25203.4599825255
90.0 25486.64492614938
91.0 25769.829869773264
92.0 26053.014813397145
93.0 26336.199757021033
94.0 26619.38470064491
95.0 26902.569644268795
96.0 27185.75458789268
97.0 27468.939531516557
98.0 27752.124475140437
99.0 28035.30941876432
100.0 28318.494362388203