Skip to content

Instantly share code, notes, and snippets.

@martinfrasch
Last active December 13, 2021 05:06
Show Gist options
  • Save martinfrasch/ba617c9008c062abf4dd03a03e8b722a to your computer and use it in GitHub Desktop.
Save martinfrasch/ba617c9008c062abf4dd03a03e8b722a to your computer and use it in GitHub Desktop.
Compute 62 HRV metrics using NeuroKit2
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Read all data\n",
"as peaks [for artifact removal and most HRV metrics] and as RRI [for nonlinear HRV metrics] to be computed in neurokit2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T00:50:32.586642Z",
"start_time": "2021-09-30T00:50:32.355709Z"
}
},
"outputs": [],
"source": [
"# call conda datanalysis environment\n",
"!conda init bash"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T00:50:40.963260Z",
"start_time": "2021-09-30T00:50:40.754473Z"
},
"scrolled": true
},
"outputs": [],
"source": [
"!conda activate datanalysis #or use your own preferred venv"
]
},
{
"cell_type": "code",
"execution_count": 105,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-03T04:59:16.929658Z",
"start_time": "2021-10-03T04:59:16.926890Z"
}
},
"outputs": [],
"source": [
"# Do all package imports\n",
"\n",
"import neurokit2 as nk\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import os\n",
"import scipy.io\n",
"from pathlib import Path\n",
"from scipy.stats import variation\n",
"from hmmlearn import hmm"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T17:33:55.425571Z",
"start_time": "2021-09-30T17:33:55.423955Z"
}
},
"outputs": [],
"source": [
"# load Matlab data files\n",
"from scipy.io import loadmat"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T17:20:14.045489Z",
"start_time": "2021-09-30T17:20:14.041563Z"
},
"scrolled": true
},
"outputs": [],
"source": [
"os.getcwd()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T17:22:01.451145Z",
"start_time": "2021-09-30T17:22:01.449338Z"
}
},
"outputs": [],
"source": [
"# Import raw peaks from the mat files; adjust to fit your input data format\n",
"\n",
"f_filepath_peaks=Path.cwd()/\"raw_peaks/f\" #fetal raw peaks mat files;\n",
"m_filepath_peaks=Path.cwd()/\"raw_peaks/m\" #maternal raw peaks mat files;"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T17:27:21.396867Z",
"start_time": "2021-09-30T17:27:21.393211Z"
}
},
"outputs": [],
"source": [
"f_peaks_files = [f for f in sorted(f_filepath_peaks.iterdir()) #create a list of relevant files in directory\n",
" if f.suffix == '.mat']\n",
"m_peaks_files = [f for f in sorted(m_filepath_peaks.iterdir()) #create a list of relevant files in directory\n",
" if f.suffix == '.mat']"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-02T00:36:43.348624Z",
"start_time": "2021-10-02T00:36:43.342688Z"
},
"run_control": {
"marked": false
}
},
"outputs": [],
"source": [
"# Read one file at a time using the above list, trim, clean, convert to RRI\n",
"# The present syntax is for a specific ECG format; adopt to your use case\n",
"# Iterate over i files in the f_ or m_ peaks_files lists and extract the correct peaks channel as numpy array\n",
"# Proceed with the steps below: HRV compute, save\n",
"\n",
"def read_mat_file(f_peaks_file, m_peaks_file):\n",
" # Import 5th row of the mat file's peak data which has 1000 Hz sampling rate\n",
" f_file_PEAK_raw=loadmat(f_peaks_file)\n",
" m_file_PEAK_raw=loadmat(m_peaks_file)\n",
"\n",
" f_peaks=f_file_PEAK_raw['fetal_Rpeaks'][4] #this is my 5th row ECG-SAVER-extracted peaks channel\n",
" m_peaks=m_file_PEAK_raw['mother_Rpeaks'][4] #this is my 5th row\n",
" \n",
" # Trim trailing zeros\n",
" f_peaks_trimmed=np.trim_zeros(f_peaks,trim='b')\n",
" m_peaks_trimmed=np.trim_zeros(m_peaks,trim='b')\n",
" \n",
" # Artifact removal [see next section for details]\n",
" f_clean_peaks=nk.signal_fixpeaks(f_peaks_trimmed, sampling_rate=1000, iterative=False, show=False,interval_min=0.33,interval_max=0.75, method=\"kubios\") #allow 80-180 bpm\n",
" m_clean_peaks=nk.signal_fixpeaks(m_peaks_trimmed, sampling_rate=1000, iterative=False, show=False,interval_min=0.4,interval_max=1.5, method=\"kubios\") #allow 40-150 bpm\n",
" \n",
" # Document artifacts from each run as clean_peaks_rri[0]: build a dataframe for each file over all segments\n",
" \n",
" # Convert to RRI\n",
" f_rri = peaks_to_rri(f_clean_peaks[1], sampling_rate=1000, interpolate=False)\n",
" m_rri = peaks_to_rri(m_clean_peaks[1], sampling_rate=1000, interpolate=False)\n",
" \n",
" return f_clean_peaks[1], m_clean_peaks[1], f_rri, m_rri, f_clean_peaks[0], m_clean_peaks[0]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"ExecuteTime": {
"end_time": "2021-09-30T18:30:16.366200Z",
"start_time": "2021-09-30T18:30:16.362777Z"
}
},
"outputs": [],
"source": [
"# Some NK functions [clean peaks function, complexity HRV metrics] take RRIs \n",
"# So use these UDFs borrowed from the NK package: convert peaks to RRI on the cleaned peaks output\n",
"\n",
"def peaks_to_rri(peaks=None, sampling_rate=1000, interpolate=False, **kwargs):\n",
"\n",
" rri = np.diff(peaks) / sampling_rate * 1000\n",
"\n",
" if interpolate is False:\n",
" return rri\n",
"\n",
" else:\n",
"\n",
" # Minimum sampling rate for interpolation\n",
" if sampling_rate < 10:\n",
" sampling_rate = 10\n",
"\n",
" # Compute length of interpolated heart period signal at requested sampling rate.\n",
" desired_length = int(np.rint(peaks[-1]))\n",
"\n",
" rri = signal_interpolate(\n",
" peaks[1:], # Skip first peak since it has no corresponding element in heart_period\n",
" rri,\n",
" x_new=np.arange(desired_length),\n",
" **kwargs\n",
" )\n",
" return rri, sampling_rate"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"# Artifact correction\n",
"Integrated into the above UDF red_mat_file"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true,
"run_control": {
"marked": true
}
},
"outputs": [],
"source": [
"# https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.signal.signal_fixpeaks\n",
"# Artifact removal on peaks using Kubios: write into UDF taking trimmed_peaks input\n",
"# caution: nk.signal_fixpeaks takes peaks, not RRI! \n",
"\n",
"# nk.signal_fixpeaks saves the corrected peak locations to the [1] index of the output data sturcture\n",
"# accessible like so: clean_peaks[1]\n",
"\n",
"# Review the settings for fetal versus maternal RRI inputs! Adjust to match your RRI physiology\n",
"# interval_min – minimum interval btw peaks | interval_max – maximum interval btw peaks.\n",
"\n",
"f_clean_peaks=nk.signal_fixpeaks(f_peaks_trimmed, sampling_rate=1000, iterative=False, show=False,interval_min=0.1,interval_max=0.25, method=\"kubios\")\n",
"m_clean_peaks=nk.signal_fixpeaks(m_peaks_trimmed, sampling_rate=1000, iterative=False, show=False,interval_min=0.1,interval_max=0.25, method=\"kubios\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# Convert trimmed and cleaned peaks to RRI (using _trimmmed_ raw peaks as input!)\n",
"rri_clean = peaks_to_rri(clean_peaks_peaks[1], sampling_rate=1000, interpolate=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Compute all HRV metrics segment-wise"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-02T00:42:27.354674Z",
"start_time": "2021-10-02T00:42:27.349411Z"
}
},
"outputs": [],
"source": [
"# UDF compute_HRV\n",
"# This UDF computes all [regular and extra non-linear] HRV metrics segment-wise for a file\n",
"\n",
"def compute_HRV(peaks,rri,SubjectID):\n",
" \n",
" # Regular HRV matrix (from peaks)\n",
" \n",
" duration_peaks=peaks[len(peaks)-1] #gives me the duration in samples\n",
"\n",
" divider=duration_peaks/1000/60/5 #sampling_rate, 5 min window segments\n",
" segment=np.array_split(peaks,divider) #divide in segments of 5 min; the last segment may be shorter; discard during statistical analysis on HRV metrics\n",
"\n",
" hrv_segment_df=pd.DataFrame()\n",
"\n",
" for i in range(len(segment)):\n",
" hrv_segment=nk.hrv(segment[i],sampling_rate=1000, show=False)\n",
" hrv_segment_df = pd.concat([hrv_segment_df,hrv_segment],ignore_index=True)\n",
"\n",
" # Additional nonlinear HRV metrics from RRIs\n",
" \n",
" segment=np.array_split(rri,divider) #divide _RRI_ in segments of 5 min; the last segment may be shorter; discard during statistical analysis on HRV metrics\n",
"\n",
" #create my dataframe structure to which to append the list as a row in the following\n",
" hrv_extra_columns=['optimal time delay','FuzzEn','FuzzEnMSE','FuzzEnRCMSE','cApEn','segment duration, s','SubjectID']\n",
" hrv_extra_complexity_df=pd.DataFrame(columns=hrv_extra_columns)\n",
" df_length=len(hrv_extra_complexity_df)\n",
" hrv_extra_complexity_df_total=pd.DataFrame(columns=hrv_extra_columns)\n",
"\n",
" for i in range(len(segment)):\n",
" optimal_complexity_parameters = nk.complexity_delay(segment[i], delay_max=100, method='fraser1986', show=False)\n",
" hrv_extra_complexity_segment_fuzen=nk.entropy_fuzzy(segment[i], delay=optimal_complexity_parameters)\n",
" hrv_extra_complexity_segment_fuzen_mse=nk.complexity_fuzzymse(segment[i],fuzzy=True)\n",
" hrv_extra_complexity_segment_fuzen_rcmse=nk.complexity_fuzzyrcmse(segment[i], fuzzy=True, composite=True, refined=True) \n",
" hrv_extra_complexity_segment_capen=nk.entropy_approximate(segment[i], delay=optimal_complexity_parameters, corrected=True)\n",
" segment_duration=np.sum(segment[i])/1000 #segment duration in seconds\n",
" #join all individual output floats including values of segment[i] - i.e., for each segment - and its duration in seconds as numpy.sum(segment[1])/1000 \n",
" hrv_extra_complexity = [optimal_complexity_parameters, hrv_extra_complexity_segment_fuzen,hrv_extra_complexity_segment_fuzen_mse,hrv_extra_complexity_segment_fuzen_rcmse,hrv_extra_complexity_segment_capen,segment_duration,SubjectID]\n",
" hrv_extra_complexity_df.loc[df_length]=hrv_extra_complexity\n",
" hrv_extra_complexity_df_total = pd.concat([hrv_extra_complexity_df_total,hrv_extra_complexity_df],ignore_index=True)\n",
"\n",
" # simply concatenate both df's horizontally; this scales allowing addition of other df's from bivariate computations\n",
" \n",
" hrv_final_df=pd.concat([hrv_segment_df, hrv_extra_complexity_df_total],axis=1)\n",
"\n",
" return hrv_final_df #this is per subject with SubjectID output along on the right side"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Compute higher order HRV metrics\n",
"This includes statistical and HMM estimates"
]
},
{
"cell_type": "code",
"execution_count": 308,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-04T04:46:03.710710Z",
"start_time": "2021-10-04T04:46:03.705366Z"
}
},
"outputs": [],
"source": [
"def compute_basic_stats(ts_data, SubjectID):\n",
"\n",
" # compute mean and variation\n",
" # assuming \"ts_data\" is where my HRV metric values list is per subject\n",
" \n",
" HRV_mean=np.mean(ts_data.values.tolist())\n",
" \n",
" HRV_coeff_variation=variation(ts_data.values.tolist())\n",
" # this function works similar to variation() but works purely with numpy\n",
" # cv = lambda x: np.std(x) / np.mean(x)\n",
"\n",
" # First quartile (Q1) \n",
" Q1 = np.percentile(ts_data, 25, interpolation = 'midpoint') \n",
" # Third quartile (Q3) \n",
" Q3 = np.percentile(ts_data, 75, interpolation = 'midpoint') \n",
" # Interquaritle range (IQR) \n",
" IQR = Q3 - Q1 \n",
" midhinge = (Q3 + Q1)/2\n",
" quartile_coefficient_dispersion = (IQR/2)/midhinge\n",
" \n",
" # adding entropy estimate; this is experimental!\n",
" # HRV_ts_entropy=nk.entropy_sample(ts_data) \n",
" # yielding error \"could not broadcast input array from shape (7,1) into shape (7)\" | the following syntax fixes that and is more elegant in that it estimates optimal delay\n",
" # optimal_complexity_parameters = nk.complexity_delay(ts_data.to_numpy, delay_max=6, method='fraser1986', show=False)\n",
" # HRV_ts_entropy=nk.entropy_fuzzy(ts_data.to_numpy, delay=optimal_complexity_parameters)\n",
" # still yielding len error \n",
" HRV_ts_entropy=nk.entropy_shannon(ts_data)\n",
" \n",
" basic_stats=[SubjectID, HRV_mean, HRV_coeff_variation[0], quartile_coefficient_dispersion, HRV_ts_entropy]\n",
" return basic_stats"
]
},
{
"cell_type": "code",
"execution_count": 277,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-04T01:17:02.353756Z",
"start_time": "2021-10-04T01:17:02.351648Z"
},
"run_control": {
"marked": false
}
},
"outputs": [],
"source": [
"#HMM Model\n",
"\n",
"def do_hmm(ts_data):\n",
" \n",
" #ts_data=numpy.array(data)\n",
" gm = hmm.GaussianHMM(n_components=2)\n",
" gm.fit(ts_data.reshape(-1, 1))\n",
" hmm_states = gm.predict(ts_data.reshape(-1, 1))\n",
" #hmm_states=[states.tolist()]\n",
" print(hmm_states)\n",
" \n",
" return hmm_states # next, add _states_ iteratively for all subjects to states_Uber list to spot patterns"
]
},
{
"cell_type": "code",
"execution_count": 185,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-03T20:56:47.422616Z",
"start_time": "2021-10-03T20:56:47.417401Z"
}
},
"outputs": [],
"source": [
"# deal with last column which is string and needs to be skipped\n",
"\n",
"def skip_last_column(lst):\n",
" # unpack the list of lists\n",
" def Extract(lst):\n",
" return [item[0] for item in lst]\n",
" # check for string in the first sublist (all I need to decide to skip it for numpy operations)\n",
" element_to_check=Extract(lst)[0]\n",
" return isinstance(element_to_check, str) #return Boolean for presence of string in the sublist"
]
},
{
"cell_type": "code",
"execution_count": 278,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-04T01:21:35.307648Z",
"start_time": "2021-10-04T01:21:35.303408Z"
},
"run_control": {
"marked": false
}
},
"outputs": [],
"source": [
"def compute_higher_HRV(hrv_final_df, SubjectID): \n",
" \n",
" # assuming \"hrv_final_df\" is the dataframe where the HRV metric values are listed segment-wise per subject\n",
" \n",
" # compute basic stats\n",
" \n",
" higher_order_HRV_basic_stats=[]\n",
" \n",
" for i in range(hrv_final_df.shape[1]): #last column is the SubjectID string, so skipping it below\n",
" \n",
" hrv_metric=hrv_final_df.iloc[:,[i]].values\n",
" \n",
" #String skip logic to skip over SubjectID column\n",
" if skip_last_column(hrv_final_df.iloc[:,[i]].values) == False:\n",
" HRV_results_temp1=compute_basic_stats(hrv_final_df.iloc[:,[i]].astype(np.float64),SubjectID)\n",
" higher_order_HRV_basic_stats.append(HRV_results_temp1)\n",
" \n",
" else:\n",
" i+=1\n",
"\n",
" HRV_basic_stats=pd.DataFrame(higher_order_HRV_basic_stats, columns=['SubjectID','HRV_mean', 'HRV_coeff_variation', 'quartile_coefficient_dispersion','HRV metrics entropy'])\n",
" hrv_columns=hrv_final_df.columns[0:63] #make sure I don't select the last column which has SubjectID\n",
" HRV_basic_stats.index=[hrv_columns]\n",
" HRV_basic_stats_final=HRV_basic_stats.T #transpose\n",
" \n",
" # compute HMM stats: computing on just 7 data points leads to errors in some instances, so omit for now and revisit later when used on longer HRV metrics time series, say, several hours \n",
" \n",
" # Estimate HMM probabilities output for a given segmented HRV metric\n",
" # Then compute basic_stats on this estimate;\n",
" # Hypothesis: stable tracings will have tight distributions of HMM values and resemble entropy estimates;\n",
" # This will apply statistically significantly for physiologically stressed (tighter distributions) versus control subjects\n",
"\n",
" #higher_order_HRV_basic_stats_on_HMM=[]\n",
" \n",
" \n",
" #for i in range(hrv_final_df.shape[1]): #last column is the SubjectID string, so removing it\n",
" \n",
" # hrv_metric=hrv_final_df.iloc[:,[i]].values\n",
" # print(\"HRV_metric has the type\", type(hrv_metric))\n",
" # some HRV metrics have NaNs and the \"do_hmm\" script crashes on those; \n",
" # Adding logic to skip if NaN is present\n",
" # a=any(pd.isna(hrv_metric)) #checking if _any_ values in HRV metrics list are NaN\n",
" # b=skip_last_column(hrv_metric)\n",
" # skip_reasons={a:'True', b:'True'}\n",
" #NaN or string skip logic\n",
" # if any(skip_reasons):\n",
" # i+=1\n",
" # else:\n",
" # HRV_results_hmm_temp2=do_hmm(hrv_metric)\n",
" # print(HRV_results_hmm_temp2)\n",
" # print(type(HRV_results_hmm_temp2))\n",
" # HRV_results_stats_hmm_temp=compute_basic_stats(HRV_results_hmm_temp2.tolist(),SubjectID) #j being the file number; != SubjectID\n",
" # higher_order_HRV_basic_stats_on_HMM.append(HRV_results_stats_hmm_temp)\n",
" \n",
" #HRV_basic_stats_on_HMM=pd.DataFrame(higher_order_HRV_basic_stats_on_HMM, columns=['HRV_HMM_mean', 'HRV_HMM_coeff_variation', 'HMM_quartile_coefficient_dispersion','HMM_HRV metrics entropy'])\n",
" #HRV_basic_stats_on_HMM.index=[hrv_columns]\n",
" #HRV_basic_stats_on_HMM_final=HRV_basic_stats_on_HMM.T #transpose\n",
" \n",
" #higher_hrv_final_df=pd.concat([HRV_basic_stats_final, HRV_basic_stats_on_HMM_final],axis=1) \n",
" \n",
" higher_hrv_final_df=HRV_basic_stats_final #leaving the syntax above for when the data allow HMM analysis \n",
" \n",
" return higher_hrv_final_df #this includes SubjectID"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"# Combine and save everything: HRV plus higher order spreadsheets"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true
},
"source": [
"UDF save_results: currently simply called in \"Execute\" section below"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# Save the combined dataframe as spreadsheet like so\n",
"\n",
"hrv_segment_df.to_excel(\"/analysis/HRV_metrics.xlsx\")\n",
"\n",
"# Save the combined artifact log dataframe as spreadsheet\n",
"\n",
"f_artifacts_log_df.to_excel(\"/analysis/artifacts_log.xlsx\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Execute the entire analysis\n",
"\n",
"For each file (fetal and maternal):\n",
"- call read_mat_file\n",
"- call compute_HRV\n",
"- save results to Excel"
]
},
{
"cell_type": "code",
"execution_count": 311,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-04T05:43:16.280920Z",
"start_time": "2021-10-04T04:47:52.307605Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Computation completed.\n"
]
}
],
"source": [
"# Initialize data structures\n",
"f_artifacts_log=[]\n",
"m_artifacts_log=[]\n",
"Uber_fHRV=[]\n",
"Uber_mHRV=[]\n",
"Uber_higher_fHRV=[]\n",
"Uber_higher_mHRV=[]\n",
"\n",
"i=0\n",
"\n",
"# Compute & save into lists\n",
"\n",
"while i<=len(f_peaks_files)-1: #careful - this assumes equal number of fetal and maternal raw files\n",
"\n",
" # read the peaks file, trim trailing zeros, artifact correct it, convert to RRIs and return the results \n",
" f_clean_peaks, m_clean_peaks, f_rri, m_rri, f_clean_peaks_artifacts, m_clean_peaks_artifacts=read_mat_file(f_peaks_files[i],m_peaks_files[i]) \n",
" \n",
" fSubjectID=format(f_peaks_files[i].stem)\n",
" mSubjectID=format(m_peaks_files[i].stem)\n",
" \n",
" f_artifacts_log_i=[fSubjectID,f_clean_peaks_artifacts]\n",
" m_artifacts_log_i=[mSubjectID,m_clean_peaks_artifacts]\n",
" \n",
" #save artifact processing log from each file starting with its real SubjectID\n",
" f_artifacts_log.append(f_artifacts_log_i)\n",
" m_artifacts_log.append(m_artifacts_log_i)\n",
" \n",
" # compute all HRV metrics\n",
" fHRV_final=compute_HRV(f_clean_peaks,f_rri,fSubjectID) \n",
" mHRV_final=compute_HRV(m_clean_peaks,m_rri,mSubjectID)\n",
" \n",
" # update the UBER df\n",
" Uber_fHRV.append(fHRV_final) \n",
" Uber_mHRV.append(mHRV_final)\n",
" \n",
" # compute higher_order HRV metrics\n",
" fHRV_higher_final=compute_higher_HRV(fHRV_final,fSubjectID)\n",
" mHRV_higher_final=compute_higher_HRV(mHRV_final,mSubjectID)\n",
" \n",
" # update the UBER_higher_df\n",
" Uber_higher_fHRV.append(fHRV_higher_final) \n",
" Uber_higher_mHRV.append(mHRV_higher_final) \n",
" \n",
" i+=1\n",
" \n",
" if i>len(f_peaks_files): \n",
" break\n",
" \n",
"print('Computation completed.')"
]
},
{
"cell_type": "code",
"execution_count": 312,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-04T06:03:12.544580Z",
"start_time": "2021-10-04T06:03:12.480039Z"
}
},
"outputs": [],
"source": [
"# save artifacts logs\n",
"df_Uber_f_artifacts = pd.DataFrame.from_records(f_artifacts_log) #edit the name as needed\n",
"df_Uber_m_artifacts = pd.DataFrame.from_records(m_artifacts_log) #edit the name as needed\n",
"\n",
"df_Uber_f_artifacts.to_excel('analysis/fHRV_UBER_artifacts_log.xlsx', index=False)\n",
"df_Uber_m_artifacts.to_excel('analysis/mHRV_UBER_artifacts_log.xlsx', index=False)"
]
},
{
"cell_type": "code",
"execution_count": 313,
"metadata": {
"ExecuteTime": {
"end_time": "2021-10-04T06:03:51.909285Z",
"start_time": "2021-10-04T06:03:49.152513Z"
}
},
"outputs": [],
"source": [
"# save HRV results\n",
"Uber_fHRV_df=pd.concat(Uber_fHRV)\n",
"Uber_fHRV_df.to_excel(\"analysis/fHRV_metrics.xlsx\")\n",
"\n",
"Uber_mHRV_df=pd.concat(Uber_mHRV)\n",
"Uber_mHRV_df.to_excel(\"analysis/mHRV_metrics.xlsx\")\n",
"\n",
"Uber_higher_fHRV_df=pd.concat(Uber_higher_fHRV)\n",
"Uber_higher_fHRV_df.to_excel(\"analysis/higher_fHRV_metrics.xlsx\")\n",
"\n",
"Uber_higher_mHRV_df=pd.concat(Uber_higher_mHRV)\n",
"Uber_higher_mHRV_df.to_excel(\"analysis/higher_mHRV_metrics.xlsx\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"hide_input": false,
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"oldHeight": 185,
"position": {
"height": "40px",
"left": "1072px",
"right": "20px",
"top": "122px",
"width": "447px"
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"varInspector_section_display": "none",
"window_display": true
}
},
"nbformat": 4,
"nbformat_minor": 2
}
@martinfrasch
Copy link
Author

This is an adaptation of Neurokit2 to take RRI or peak times from fetal and maternal heart rate data as input and output 60+ HRV measures including optimal time delay-based complexity measures with user-definable time window length. Huge thanks to the team of neurokit!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment