cmkl/fall-2024/aicore/aic-501/00030/AIC-201-00030 - Thanawin Pa...

202 lines
6.3 KiB
Plaintext
Raw Normal View History

2024-11-29 23:54:08 +07:00
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import StandardScaler\n",
"from tensorflow.keras.utils import to_categorical\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Dense\n",
"from tensorflow.keras.optimizers import Adam, SGD\n",
"from tensorflow.keras.callbacks import EarlyStopping"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load dataset\n",
"iris_df = pd.read_csv('iris_dataset.csv')\n",
"\n",
"# Feature set (X) and target labels (y)\n",
"X_features = iris_df.iloc[:, :-1].values\n",
"y_labels = iris_df.iloc[:, -1].values\n",
"\n",
"# Split into training and testing sets (80/20)\n",
"X_train_set, X_test_set, y_train_set, y_test_set = train_test_split(X_features, y_labels, test_size=0.2, random_state=42, stratify=y_labels)\n",
"\n",
"# Standardize the feature data\n",
"scaler_instance = StandardScaler()\n",
"X_train_set = scaler_instance.fit_transform(X_train_set)\n",
"X_test_set = scaler_instance.transform(X_test_set)\n",
"\n",
"# Convert labels to one-hot encoded format\n",
"y_train_set_onehot = to_categorical(y_train_set, num_classes=3)\n",
"y_test_set_onehot = to_categorical(y_test_set, num_classes=3)\n",
"\n",
"# Early stopping configuration\n",
"early_stop_callback = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 1st Model\n",
"Using Adam optimizer and two hidden layers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_1 = Sequential([\n",
" Dense(16, input_dim=X_train_set.shape[1], activation='relu'),\n",
" Dense(16, activation='relu'),\n",
" Dense(3, activation='softmax')\n",
"])\n",
"\n",
"model_1.compile(optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])\n",
"\n",
"# Training the first model\n",
"history_1 = model_1.fit(X_train_set, y_train_set_onehot, epochs=100, batch_size=8, validation_split=0.2, verbose=1, callbacks=[early_stop_callback])\n",
"\n",
"# Evaluate Model 1\n",
"test_loss_1, test_acc_1 = model_1.evaluate(X_test_set, y_test_set_onehot)\n",
"print(f\"Model 1 - Test Loss: {test_loss_1}, Test Accuracy: {test_acc_1}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 2nd Model\n",
"Using SGD optimizer and two hidden layers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"model_2 = Sequential([\n",
" Dense(16, input_dim=X_train_set.shape[1], activation='relu'),\n",
" Dense(16, activation='relu'),\n",
" Dense(3, activation='softmax')\n",
"])\n",
"\n",
"model_2.compile(optimizer=SGD(learning_rate=0.01), loss='categorical_crossentropy', metrics=['accuracy'])\n",
"\n",
"# Training the second model\n",
"history_2 = model_2.fit(X_train_set, y_train_set_onehot, epochs=100, batch_size=8, validation_split=0.2, verbose=1, callbacks=[early_stop_callback])\n",
"\n",
"# Evaluate Model 2\n",
"test_loss_2, test_acc_2 = model_2.evaluate(X_test_set, y_test_set_onehot)\n",
"print(f\"Model 2 - Test Loss: {test_loss_2}, Test Accuracy: {test_acc_2}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 3rd Model\n",
"Using Adam optimizer, Tanh activation, and three hidden layers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_3 = Sequential([\n",
" Dense(32, input_dim=X_train_set.shape[1], activation='tanh'),\n",
" Dense(32, activation='tanh'),\n",
" Dense(32, activation='tanh'),\n",
" Dense(3, activation='softmax')\n",
"])\n",
"\n",
"model_3.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])\n",
"\n",
"# Training the third model\n",
"history_3 = model_3.fit(X_train_set, y_train_set_onehot, epochs=100, batch_size=8, validation_split=0.2, verbose=1, callbacks=[early_stop_callback])\n",
"\n",
"# Evaluate Model 3\n",
"test_loss_3, test_acc_3 = model_3.evaluate(X_test_set, y_test_set_onehot)\n",
"print(f\"Model 3 - Test Loss: {test_loss_3}, Test Accuracy: {test_acc_3}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 4th Model\n",
"Using SGD optimizer, Sigmoid activation, and one hidden layer"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_4 = Sequential([\n",
" Dense(64, input_dim=X_train_set.shape[1], activation='sigmoid'),\n",
" Dense(3, activation='softmax')\n",
"])\n",
"\n",
"model_4.compile(optimizer=SGD(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])\n",
"\n",
"# Training the fourth model\n",
"history_4 = model_4.fit(X_train_set, y_train_set_onehot, epochs=100, batch_size=8, validation_split=0.2, verbose=1, callbacks=[early_stop_callback])\n",
"\n",
"# Evaluate Model 4\n",
"test_loss_4, test_acc_4 = model_4.evaluate(X_test_set, y_test_set_onehot)\n",
"print(f\"Model 4 - Test Loss: {test_loss_4}, Test Accuracy: {test_acc_4}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"___________"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}