cmkl/fall-2024/math/mat-206/00020/MAT-206-00020.ipynb

403 lines
12 KiB
Plaintext
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"cells": [
{
"cell_type": "code",
"execution_count": 215,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import pingouin as pg\n",
"\n",
"from scipy.stats import bartlett, levene"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Reading and preprocessing data"
]
},
{
"cell_type": "code",
"execution_count": 216,
"metadata": {},
"outputs": [],
"source": [
"DataWhr2024 = pd.read_csv(\"DataWhr2024.csv\")\n",
"UnM49 = pd.read_csv(\"UnM49.csv\", sep=';')"
]
},
{
"cell_type": "code",
"execution_count": 217,
"metadata": {},
"outputs": [],
"source": [
"DataWhr2024.loc[DataWhr2024[\"Country name\"].str.startswith(\"Hong\"), \"Country name\"] = \"Hong Kong\"\n",
"DataWhr2024.loc[DataWhr2024[\"Country name\"].str.startswith(\"Somaliland\"), \"Country name\"] = \"Somaliland\"\n",
"DataWhr2024.loc[DataWhr2024[\"Country name\"].str.startswith(\"Taiwan\"), \"Country name\"] = \"Taiwan\""
]
},
{
"cell_type": "code",
"execution_count": 218,
"metadata": {},
"outputs": [],
"source": [
"UnM49 = UnM49[['Country or Area', 'Sub-region Name', 'Region Name']]\n",
"UnM49 = UnM49.rename({'Country or Area':'Country name', 'Sub-region Name':'Subregion', 'Region Name':'Continent'}, axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 219,
"metadata": {},
"outputs": [],
"source": [
"UnM49.loc[97, \"Country name\"] = \"Bolivia\"\n",
"UnM49.loc[33, \"Country name\"] = \"Congo (Brazzaville)\"\n",
"UnM49.loc[34, \"Country name\"] = \"Congo (Kinshasa)\"\n",
"UnM49.loc[124, \"Country name\"] = \"Hong Kong\"\n",
"UnM49.loc[125, \"Country name\"] = \"Macao\"\n",
"UnM49.loc[126, \"Country name\"] = \"North Korea\"\n",
"UnM49.loc[145, \"Country name\"] = \"Iran\"\n",
"UnM49.loc[46, \"Country name\"] = \"Ivory Coast\"\n",
"UnM49.loc[133, \"Country name\"] = \"Laos\"\n",
"UnM49.loc[129, \"Country name\"] = \"South Korea\"\n",
"UnM49.loc[173, \"Country name\"] = \"Moldova\"\n",
"UnM49.loc[217, \"Country name\"] = \"Netherlands\"\n",
"UnM49.loc[175, \"Country name\"] = \"Russia\"\n",
"UnM49.loc[164, \"Country name\"] = \"Syria\"\n",
"UnM49.loc[26, \"Country name\"] = \"Tanzania\"\n",
"UnM49.loc[116, \"Country name\"] = \"United States\"\n",
"UnM49.loc[193, \"Country name\"] = \"United Kingdom\"\n",
"UnM49.loc[111, \"Country name\"] = \"Venezuela\"\n",
"UnM49.loc[140, \"Country name\"] = \"Vietnam\""
]
},
{
"cell_type": "code",
"execution_count": 220,
"metadata": {},
"outputs": [],
"source": [
"_ = pd.DataFrame(\n",
" {\n",
" \"Country name\": [\"Kosovo\", \"Somaliland\", \"Taiwan\"],\n",
" \"Subregion\": [\"Southern Europe\", \"Sub-Saharan Africa\", \"Eastern Asia\"],\n",
" \"Continent\": [\"Europe\", \"Africa\", \"Asia\"],\n",
" }\n",
")\n",
"\n",
"UnM49 = pd.concat([UnM49, _], axis=0)\n",
"UnM49 = UnM49.reset_index(drop=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Merging the datasets"
]
},
{
"cell_type": "code",
"execution_count": 221,
"metadata": {},
"outputs": [],
"source": [
"# Data\n",
"Dat = pd.merge(DataWhr2024, UnM49)\n",
"\n",
"# Data of 2023\n",
"Dat2023 = Dat[Dat['year'] == 2023]\n",
"Dat2023 = Dat2023.reset_index(drop=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Question 1**"
]
},
{
"cell_type": "code",
"execution_count": 222,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"μSE: 5.678\n",
"\n",
"One-sample t-test result:\n",
" T dof alternative p-val CI95% cohen-d BF10 \\\n",
"T-test -0.075657 8 two-sided 0.941549 [5.02, 6.34] 0.025219 0.322 \n",
"\n",
" power \n",
"T-test 0.050515 \n",
"\n"
]
}
],
"source": [
"# Step 1: Southeast Asia Mean (μSE) and Hypothesis Testing\n",
"Dat2023SEA = Dat2023[Dat2023['Subregion'] == 'South-eastern Asia']['Life Ladder']\n",
"\n",
"mu_se = Dat2023SEA.mean()\n",
"\n",
"t_test_result = pg.ttest(Dat2023SEA, 5.7)\n",
"\n",
"print(f\"μSE: {mu_se:.3f}\\n\")\n",
"print(f\"One-sample t-test result:\\n{t_test_result}\\n\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Question 2**"
]
},
{
"cell_type": "code",
"execution_count": 223,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"σ²SE: 0.731, σ²LA: 0.194\n",
"\n",
"Levene's test result:\n",
" W pval equal_var\n",
"levene 3.141025 0.088534 True\n",
"\n"
]
}
],
"source": [
"# Step 2: Variance (σ²SE, σ²LA) and Hypothesis Testing\n",
"Dat2023LA = Dat2023[Dat2023['Subregion'] == 'Latin America and the Caribbean']['Life Ladder']\n",
"\n",
"sigma2_se = Dat2023SEA.var(ddof=1)\n",
"sigma2_la = Dat2023LA.var(ddof=1)\n",
"\n",
"f_test_result = pg.homoscedasticity([Dat2023SEA.values, Dat2023LA.values], method='levene')\n",
"\n",
"print(f\"σ²SE: {sigma2_se:.3f}, σ²LA: {sigma2_la:.3f}\\n\")\n",
"print(f\"Levene's test result:\\n{f_test_result}\\n\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Question 3**"
]
},
{
"cell_type": "code",
"execution_count": 224,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"μLA: 6.297\n",
"Two-sample t-test result:\n",
" T dof alternative p-val CI95% cohen-d \\\n",
"T-test -2.040107 10.186481 two-sided 0.068122 [-1.29, 0.06] 1.022676 \n",
"\n",
" BF10 power \n",
"T-test 1.597 0.672925 \n",
"\n"
]
}
],
"source": [
"# Step 3: Mean (μLA) and Hypothesis Testing\n",
"mu_LA = Dat2023LA.values.mean()\n",
"t_test_ind_result = pg.ttest(Dat2023SEA, Dat2023LA)\n",
"\n",
"print(f\"μLA: {mu_LA:.3f}\")\n",
"print(f\"Two-sample t-test result:\\n{t_test_ind_result}\\n\")"
]
},
{
"cell_type": "code",
"execution_count": 225,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"np.int64(138)"
]
},
"execution_count": 225,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"Dat2023['Continent'].dropna().count()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Question 4**"
]
},
{
"cell_type": "code",
"execution_count": 243,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ANOVA Table:\n",
" Source SS DF MS F p-unc np2\n",
"0 Continent 90.218922 4 22.554730 34.218881 1.271847e-19 0.50718\n",
"1 Within 87.664444 133 0.659131 NaN NaN NaN\n",
"\n",
"Post-Hoc Analysis:\n",
"Means by Continent:\n",
"Continent\n",
"Africa 4.485\n",
"Americas 6.336\n",
"Asia 5.433\n",
"Europe 6.454\n",
"Oceania 7.001\n",
"Name: Life Ladder, dtype: float64\n",
"Intercontinental Mean (μ): 5.621\n",
"Intercontinental Variance (τ²): 0.793\n"
]
}
],
"source": [
"import pingouin as pg\n",
"import pandas as pd\n",
"import numpy as np\n",
"'''\n",
"# Define the mapping of sub-regions to continents\n",
"sub_region_to_continent = {\n",
" 'Southern Asia': 'Asia',\n",
" 'South-eastern Asia': 'Asia',\n",
" 'Eastern Asia': 'Asia',\n",
" 'Central Asia': 'Asia',\n",
" 'Southern Europe': 'Europe',\n",
" 'Western Europe': 'Europe',\n",
" 'Eastern Europe': 'Europe',\n",
" 'Northern Europe': 'Europe',\n",
" 'Latin America and the Caribbean': 'America',\n",
" 'Northern America': 'America',\n",
" 'Sub-Saharan Africa': 'Africa',\n",
" 'Northern Africa': 'Africa',\n",
" 'Australia and New Zealand': 'Oceania'\n",
"}\n",
"\n",
"# Map the 'Subregion' values to continents\n",
"Dat2023['Continent'] = Dat2023['Subregion'].map(sub_region_to_continent)\n",
"'''\n",
"# Drop rows with missing values in 'Continent' or 'Life Ladder'\n",
"anova_data = Dat2023[['Continent', 'Life Ladder']].dropna()\n",
"\n",
"# Perform the ANOVA test\n",
"anova_result = pg.anova(data=anova_data, dv='Life Ladder', between='Continent', detailed=True)\n",
"\n",
"# Print the ANOVA table\n",
"print(\"ANOVA Table:\")\n",
"print(anova_result)\n",
"\n",
"# Extract the relevant ANOVA results for sum of squares (SS)\n",
"ss_between = anova_result['SS'].iloc[0] # Sum of Squares between\n",
"ss_within = anova_result['SS'].iloc[1] # Sum of Squares within\n",
"\n",
"# Extract the degrees of freedom (df) for between and within\n",
"df_between = anova_result['DF'].iloc[0] # Degrees of freedom between\n",
"df_within = anova_result['DF'].iloc[1] # Degrees of freedom within\n",
"\n",
"# Extract the mean squares (MS) for between and within\n",
"ms_between = anova_result['MS'].iloc[0] # Mean square between\n",
"ms_within = anova_result['MS'].iloc[1] # Mean square within\n",
"\n",
"# F-statistic\n",
"f_stat = ms_between / ms_within\n",
"\n",
"# Post-hoc analysis if the null hypothesis is rejected\n",
"if anova_result['p-unc'].iloc[0] < 0.05: # If H0 is rejected\n",
" # Group statistics\n",
" continent_means = anova_data.groupby('Continent')['Life Ladder'].mean()\n",
" # Aggregating count, mean, and variance for each continent group\n",
" DatGroup = anova_data.groupby(\"Continent\")[\"Life Ladder\"].agg([\"count\", \"mean\", \"var\"]).reset_index()\n",
"\n",
" # Extract the necessary columns for calculation\n",
" count_values = DatGroup[\"count\"]\n",
" mean_values = DatGroup[\"mean\"]\n",
" var_values = DatGroup[\"var\"]\n",
"\n",
" # Intercontinental mean (μ) calculation\n",
" n_tot = len(anova_data) # Total number of observations\n",
" J = len(DatGroup) # Number of continents/groups\n",
" n_Bar = n_tot / J # Average sample size per group\n",
"\n",
" mu = anova_data['Life Ladder'].mean()\n",
"\n",
" # Intercontinental Variance (τ²)\n",
" tau_squared = (ms_between - ms_within) / n_Bar\n",
"\n",
" # Print results\n",
" print(\"\\nPost-Hoc Analysis:\")\n",
" print(f\"Means by Continent:\\n{continent_means.round(3)}\")\n",
" print(f\"Intercontinental Mean (μ): {mu:.3f}\")\n",
" print(f\"Intercontinental Variance (τ²): {tau_squared:.3f}\")\n",
"else:\n",
" print(\"\\nGlobal Analysis:\")\n",
" global_mean = anova_data['Life Ladder'].mean()\n",
" global_variance = anova_data['Life Ladder'].var(ddof=1)\n",
" print(f\"Global Mean (θ): {global_mean:.3f}\")\n",
" print(f\"Global Variance (σ²): {global_variance:.3f}\")\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}