This notebook was developed as part of the manuscript "Response of Lacustrine Glacier Dynamics to Atmospheric Forcing in the Cordillera Darwin" by Langhamer et al. (2024). It demonstrates the use of the basic functions provided in IceDynamics.py to make it accessible to the scientific community. This work is licensed under CC BY 4.0. Use of this dataset and/or functions and/or notebook requires a reference to the original publication in addition to the Zenodo repository.
The TerminusChange() function calculates mean changes in the ice front position in meters per day by dividing the ice front into n equidistant intervals and calculating changes parallel to the main flow line.
The calving() function estimates the calving flux in meters per day based on the changes in the ice front position and the ice flow velocity estimates from Sentinel-1. The required ice thickness estimates are derived from lake depth measurements and digital elevation models generated by unmanned aerial vehicle (UAV) missions.
# Load necessary packages.
import glob
import numpy as np
import scipy
from rasterstats import zonal_stats
import geopandas as gpd
import pandas as pd
import datetime
import plotly.graph_objs as go
import iaaft # iaaft.py need to be in same directory as this Jupyter notebook. Download the package from https://github.com/mlcs/iaaft.
'''
Import functions provided by IceDynamics.py.
Note that IceDynamics.py must be in the same directory as this Jupyter notebook.
'''
from IceDynamics import TerminusChange, Calving, peak_count, find_peaks_in_data, significance_test
# folder with center flowline as a shapefile
oggm_wd = './flowline/'
# specify the projection coordinates
projection = '+proj=utm +zone=19 +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
# load and project flowline
sen_fl = gpd.read_file(f'{oggm_wd}/sen_flow.shp')
sen_fl_proj = sen_fl.to_crs(projection)
Calculates mean changes along the mean flow line of the ice front position. The ice front position is provided as a polyline. We used PyTrX (How et al. 2020) availbel on https://github.com/PennyHow/PyTrx to reproject the camera images into a real world coordinate system.
# provide the paths of the directories containing the data
path_shapes = './ice-front-position/*.shp'
'''
The shapefile name contains information about the image name and date.
Specify the digits in the path where the information is contained.
We started counting from the end to provide broad usability regardless of the root list.
'''
img_bgn = -50
img_end = -36
bgn_date = -20
end_date = -4
# Specify the image to which the final chances are related. We have chosen the image
# where the glacier had the greatest advance.
reference_image=4
# distance in m along terminus
interval_dist=2
# Length of ice front. Should be not greater than the smallest polyline of the ice front position.
# This line will be subdivided into equidistant (interval_dist) intervals.
length=460
# call the function
df_terminus = TerminusChange(path_shapes, length, interval_dist, reference_image,img_bgn,img_end,bgn_date,end_date,sen_fl_proj)
/home/lukas/Dokumente/Forschung/Patagonia/Manuscript/Paper/Manuscript-data/IceDynamics.py:63: FutureWarning: Calling float on a single element Series is deprecated and will raise a TypeError in the future. Use float(ser.iloc[0]) instead distance_ary[k,:] = dist_list # write list of distances from each image into a n-dim array
403 of 403
df_terminus.head() # mean changes in meter are stored in a DataFrame with date time index
terminus | |
---|---|
2015-09-13 13:19:00 | 3.918974 |
2015-09-22 09:18:00 | 1.662041 |
2015-09-28 07:18:00 | 0.931477 |
2015-10-03 09:17:00 | 0.880559 |
2015-10-08 09:17:00 | 0.000000 |
Plot changes of ice position
# Plot individual measurements
lower_cam = go.Scatter(x=df_terminus.index[:262], y=df_terminus.terminus[:262],
mode='markers', marker=dict(size=7), name='lower cam')
upper_cam = go.Scatter(x=df_terminus.index[262:], y=df_terminus.terminus[262:],
mode='markers', marker=dict(size=7, color='brown'), name='upper cam')
# Plot rolling mean
lower_cam_mean = go.Scatter(x=df_terminus.index[:262], y=df_terminus.terminus[:262].rolling('15d', min_periods=2).mean().shift(-7.5, freq='d'),
line=dict(width=1.5, color='lightblue'), name='15d rolling mean')
upper_cam_mean = go.Scatter(x=df_terminus.index[262:], y=df_terminus.terminus[262:].rolling('15d', min_periods=2).mean().shift(-7.5, freq='d'),
line=dict(width=1.5, color='sandybrown'), name='15d rolling mean')
# Create layout
layout = go.Layout(xaxis=dict(title='Date'),
yaxis=dict(title='Distance to ice front position (%s) in m' %(df_terminus.index[reference_image].strftime('%Y-%m-%d')),
range=[0, 210]),
legend=dict(x=0, y=1, traceorder='normal'))
# Combine traces and layout into figure
fig = go.Figure(data=[lower_cam, upper_cam, lower_cam_mean, upper_cam_mean], layout=layout)
# Plot figure
fig.show() # resutling image: ./plots/Terminus.png
Load all the necessary input fields:
# The height (meters) was estimated by DEM's.
height = [24.190792,23.190792,22.429890,21.045753,18.808300,10.903894,7.426261]
date_list = [datetime.datetime(2015,9,18),datetime.datetime(2016,10,5),datetime.datetime(2017,3,27),datetime.datetime(2018,3,20),datetime.datetime(2019,4,19),datetime.datetime(2020,3,10),datetime.datetime(2022,1,17)] # date of the DEM
# Generate a DataFrame containing the mean height of the ice front above lake level.
height_df = pd.DataFrame({'height': height})
height_df.index = date_list
# Load polygone of an area close to the terminus to estimate the lake depth close to the calving front.
lake = gpd.read_file('bathymetry/lake-terminus/LakeTerminus.shp')
# Load lake bathymery data and calculate mean depth close to terminus
lake_depth = zonal_stats(lake,'./bathymetry/bathy_int_clip.tif', stats='mean', all_touched=False)
lake_depth = lake_depth[0]['mean']
# Read all input fields and results provided by Langhamer et al. (2023).
param = pd.read_csv('./data/Parameter-selected.csv',parse_dates=['date'],index_col=['date'])
# Linear interpolation of the ice front position estimates to retrieve daily data
terminus_d = df_terminus.terminus.resample('1d').mean().interpolate('linear')
terminus_d =pd.DataFrame(terminus_d)
# Linear interpolation of ice elevation changes
height_df_d = height_df.resample('1d').mean().interpolate('linear')
terminus_d['height'] = height_df_d
To calculate the calving flux cosider only the time period where data is available:
# Data need to cover same period
sentinel_velo_d = param['sentinel_velo_m/d'][param.index.isin(terminus_d.index)]
terminus_d = terminus_d[terminus_d.index.isin(param.index)]
# Consider data gaps
terminus_d[(terminus_d.index>datetime.datetime(2016,10,5)) & (terminus_d.index<datetime.datetime(2017,3,27))] = np.nan
terminus_d[(terminus_d.index>datetime.datetime(2018,4,1)) & (terminus_d.index<datetime.datetime(2018,11,1))] = np.nan
terminus_d[(terminus_d.index>datetime.datetime(2019,4,19)) & (terminus_d.index<datetime.datetime(2020,3,10))] = np.nan
Calculate the calving flux and store the result into the DataFrame terminus_d.
# Width of the glacier perpendicular to flowline in meters
w = 530
# Function calculates the calving flux im m³/s. A column is added to the input dataframe *terminus_d*.
terminus_d = Calving(terminus_d, sentinel_velo_d,w,lake_depth)
Iterated amplitude adjusted fourier transform time series surrogates are used to test the significance. Use the python function iaaft.py available on https://github.com/mlcs/iaaft to generate n-numbers of surrogates. The credit goes to Venema et al. (2006). The 95th-percentile of the n surrogates is considered to reject the null hypothesis.
Venema, V., Ament, F. & Simmer, C. A stochastic iterative amplitude adjusted Fourier Transform algorithm with improved accuracy (2006), Nonlin. Proc. Geophys. 13, pp. 321--328 https://doi.org/10.5194/npg-13-321-2006
# The investigated signal is the 5days centered rolling mean of the calving flux
signal_data = terminus_d.calving.rolling(5,center = True).mean()
# Define n-numbers of surrogates
n_sur = 100
# Calculate n surrogates of the original data set
sur_func = iaaft.surrogates(x=signal_data[~np.isnan(signal_data)], ns=n_sur, verbose=True)
Estim100%|██████████████████████████████| 100/100 [00:05<00:00, 16.76it/s]
# Define the peak width in days. It is measured at half the peak.
peak_width = 2
# Days before peak which should be taken into consideration
days_offset = 5
# Find the dates within the specified days_offset before a peak
peak_date = find_peaks_in_data(signal_data,peak_width,days_offset)
# Investigated events contained in param
events = ['RR1_flag','WSDI_flag','CSDI_flag','RRdry_flag','AR_det']
# Iterate through events and print their contribution and significance
for name in events:
data = param[param[name] == 1]
# data should cover same data period as calving data
data = data[~data['calving_m3/s'].isna()]
sigma = significance_test(sur_func, signal_data, data,peak_width, days_offset)
ratio,dates_in_peak = peak_count(data,peak_date)
print(name, ':', ratio, 'significant:', ratio>sigma)
RR1_flag : 0.4585635359116022 significant: True WSDI_flag : 0.5 significant: True CSDI_flag : 0.35714285714285715 significant: False RRdry_flag : 0.2857142857142857 significant: False AR_det : 0.37593984962406013 significant: True
The graph shows the detected peaks of the calving flux and the onset of the atmospheric which is associated with the peak.
import plotly.graph_objects as go
# Find peaks in the signal using scipy.signal.find_peaks
idx = scipy.signal.find_peaks(signal_data,width=peak_width)
# Create figure object
fig = go.Figure()
# Plot identified peaks in the signal
for i in idx[0]:
fig.add_shape(
type="line",
x0=signal_data.index[i],
y0=0,
x1=signal_data.index[i],
y1=1,
line=dict(color='black', width=2),
name='Peaks'
)
# Loop over each event type ('RR1_flag', 'WSDI_flag', 'AR_det')
colors = ("green", "red", "blue")
k = 0
for name in ['RR1_flag', 'WSDI_flag', 'AR_det']:
# Filter data by the event type and remove NaNs from calving data
data = param[param[name] == 1]
data = data[~data['calving_m3/s'].isna()]
# Calculate significance using surrogated functions
sigma = significance_test(sur_func, signal_data, data, peak_width, days_offset)
# Calculate ratio of calving events associated with a peak
ratio, dates_in_peak = peak_count(data, peak_date)
# Plot vertical lines for each event within 5 days of peak
for i in dates_in_peak[1:]:
fig.add_shape(
type="line",
x0=i,
y0=0,
x1=i,
y1=1,
line=dict(color=colors[k], width=2, dash='dot')
)
k = k + 1
# Plot underlying signal (calving)
fig.add_trace(
go.Scatter(
x=signal_data.index,
y=signal_data.values,
mode='lines',
name='calving flux'
)
)
# label the lines (workaround because add_shape does not allow labels)
fig.add_trace(go.Scatter(x=[dates_in_peak],
y=[0],
mode='lines',
line=dict(color='black'),
name='peak'))
colors = ("green", "red", "blue")
label = ('wet spell','warm spell','AR')
for i in range(len(label)):
fig.add_trace(go.Scatter(x=[dates_in_peak],
y=[0],
mode='lines',
line=dict(color=colors[i], dash='dash'),
name=label[i]))
# Set figure layout
fig.update_layout(
yaxis_title="Calving in m³/s",
showlegend=True,
)
fig.show() # resutling image: ./plots/Peaks.png and a closer section in ./plots/Peaks-Zoom.png