{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "accelerator": "GPU", "colab": { "provenance": [], "machine_shape": "hm" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "MVKqY4zjbLzX", "outputId": "b5391179-6c4e-49cb-ea0e-9d380bdd8879" }, "source": [ "gpu_info = !nvidia-smi\n", "gpu_info = '\\n'.join(gpu_info)\n", "if gpu_info.find('failed') >= 0:\n", " print('Select the Runtime > \"Change runtime type\" menu to enable a GPU accelerator, ')\n", " print('and then re-execute this cell.')\n", "else:\n", " print(gpu_info)\n", " #For GPU tunning" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "Fri Mar 26 19:02:32 2021 \n", "+-----------------------------------------------------------------------------+\n", "| NVIDIA-SMI 460.56 Driver Version: 460.32.03 CUDA Version: 11.2 |\n", "|-------------------------------+----------------------+----------------------+\n", "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n", "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n", "| | | MIG M. |\n", "|===============================+======================+======================|\n", "| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n", "| N/A 35C P0 28W / 250W | 0MiB / 16280MiB | 0% Default |\n", "| | | N/A |\n", "+-------------------------------+----------------------+----------------------+\n", " \n", "+-----------------------------------------------------------------------------+\n", "| Processes: |\n", "| GPU GI CI PID Type Process name GPU Memory |\n", "| ID ID Usage |\n", "|=============================================================================|\n", "| No running processes found |\n", "+-----------------------------------------------------------------------------+\n" ], "name": "stdout" } ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "I0Ilk9PubNPb", "outputId": "055a9f9f-bc7d-46b8-cfa7-ba8e4834600e" }, "source": [ "from psutil import virtual_memory\n", "ram_gb = virtual_memory().total / 1e9\n", "print('Your runtime has {:.1f} gigabytes of available RAM\\n'.format(ram_gb))\n", "\n", "if ram_gb < 20:\n", " print('To enable a high-RAM runtime, select the Runtime > \"Change runtime type\"')\n", " print('menu, and then select High-RAM in the Runtime shape dropdown. Then, ')\n", " print('re-execute this cell.')\n", "else:\n", " print('You are using a high-RAM runtime!')\n", " #FOR ram tunning" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "Your runtime has 27.4 gigabytes of available RAM\n", "\n", "You are using a high-RAM runtime!\n" ], "name": "stdout" } ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "jEWLICopbNSC", "outputId": "8fbda61b-ad40-449a-841f-2a69e0b70af6" }, "source": [ "from google.colab import drive\n", "drive.mount('/content/drive')\n", "import os\n", "os.chdir(\"/content/drive/My Drive\")\n", "!ls\n", "#For conneting DB" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "Mounted at /content/drive\n", " Brain_all_downsized2.npy 'H&Y look up table.xlsx'\n", " Brain_Label.npy\t PPMI_FLIRT_BRAIN\n" ], "name": "stdout" } ] }, { "cell_type": "code", "metadata": { "id": "QEQbawx4bNVD" }, "source": [ "from tensorflow import keras\n", "\n", "from keras import Sequential\n", "import matplotlib.pyplot as plt\n", "from keras.layers import Dense, Flatten, Conv3D, Conv2D, MaxPooling3D\n", "from keras.utils import to_categorical\n", "\n", "import numpy as np \n", "# import nessesary libs" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "tPatuD6YInVZ", "outputId": "435a5b5a-506a-42f4-b110-6c550f34559f" }, "source": [ "count = 0\n", "Y_all = np.load('Brain_Label.npy')\n", "Y_all = Y_all[:,1]\n", "for i in range(0,len(Y_all)):\n", " if Y_all[i]>0:\n", " count=count+1\n", "print(count)\n", "# import data and checking" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "871\n" ], "name": "stdout" } ] }, { "cell_type": "code", "metadata": { "id": "-3cqEIBjbNX6", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "7a692f59-efb2-4633-d195-eb459ae15429" }, "source": [ "\n", "\n", "import keras\n", "from keras import Sequential\n", "import matplotlib.pyplot as plt\n", "from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D\n", "from tensorflow.python.keras.layers.normalization import BatchNormalization\n", "from keras.utils import to_categorical\n", "import numpy as np\n", "\n", "\n", "X_all = np.load('Brain_all_downsized2.npy')\n", "# import data\n", "print(np.shape(X_all))\n", "X_all = np.expand_dims(X_all,axis=4)\n", "Y_all = np.load('Brain_Label.npy')\n", "#import Label \n", "Y_all = Y_all[:,1]\n", "Y_temp = Y_all\n", "Y_all = to_categorical(Y_all)\n", "ti = X_all.shape\n", "# making the necessary adjustments\n", "print(ti)\n", "samples, width, height, depth , _= np.shape(X_all)\n", "X_all = X_all[1:samples,:,:,:]\n", "print(ti)\n", "tx,ty = Y_all.shape\n", "print(tx)\n", "\n", "\n", "\n", "# -- Preparatory code --\n", "# Model configuration\n", "batch_size = 1\n", "no_epochs = 20\n", "no_classes = 2 #for parkinson and control\n", "verbosity = 1\n", "\n", "learning_rate = 0.001\n", " \n", "\n", "\n", "\n", "\n", "\n", "print(\"Started\")\n", "#========================================================================================\n", "# CNN model\n", "\n", "model = Sequential()\n", "\n", "\n", "model.add(Conv3D(filters=64, kernel_size=3, activation=\"relu\",input_shape=(width,height,depth,1)))\n", "model.add(MaxPooling3D(pool_size=2))\n", "model.add(BatchNormalization())\n", "\n", "model.add(Conv3D(filters=64, kernel_size=3, activation=\"relu\"))\n", "model.add(MaxPooling3D(pool_size=2))\n", "model.add(BatchNormalization())\n", "\n", "model.add(Conv3D(filters=64, kernel_size=3, activation=\"relu\"))\n", "model.add(MaxPooling3D(pool_size=2))\n", "model.add(BatchNormalization())\n", "\n", "model.add(Conv3D(filters=64, kernel_size=3, activation=\"relu\"))\n", "model.add(MaxPooling3D(pool_size=2))\n", "model.add(BatchNormalization())\n", "\n", "\n", "# Passing it to a Fully Connected layer\n", "model.add(Flatten())\n", "\n", "\n", "\n", "# 2nd Fully Connected Layer\n", "model.add(Dense(1000))\n", "model.add(Activation('relu'))\n", "# Add Dropout\n", "\n", "\n", "# 3rd Fully Connected Layer\n", "model.add(Dense(1000))\n", "model.add(Activation('relu'))\n", "# Add Dropout\n", "\n", "\n", "\n", "# Output Layer\n", "model.add(Dense(2))\n", "model.add(Activation('softmax'))\n", "\n", "model.summary()\n", "\n" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ "(1131, 91, 109, 91)\n", "(1131, 91, 109, 91, 1)\n", "(1131, 91, 109, 91, 1)\n", "1130\n", "basladı\n", "Model: \"sequential\"\n", "_________________________________________________________________\n", "Layer (type) Output Shape Param # \n", "=================================================================\n", "conv3d (Conv3D) (None, 89, 107, 89, 64) 1792 \n", "_________________________________________________________________\n", "max_pooling3d (MaxPooling3D) (None, 44, 53, 44, 64) 0 \n", "_________________________________________________________________\n", "batch_normalization (BatchNo (None, 44, 53, 44, 64) 256 \n", "_________________________________________________________________\n", "conv3d_1 (Conv3D) (None, 42, 51, 42, 64) 110656 \n", "_________________________________________________________________\n", "max_pooling3d_1 (MaxPooling3 (None, 21, 25, 21, 64) 0 \n", "_________________________________________________________________\n", "batch_normalization_1 (Batch (None, 21, 25, 21, 64) 256 \n", "_________________________________________________________________\n", "conv3d_2 (Conv3D) (None, 19, 23, 19, 64) 110656 \n", "_________________________________________________________________\n", "max_pooling3d_2 (MaxPooling3 (None, 9, 11, 9, 64) 0 \n", "_________________________________________________________________\n", "batch_normalization_2 (Batch (None, 9, 11, 9, 64) 256 \n", "_________________________________________________________________\n", "conv3d_3 (Conv3D) (None, 7, 9, 7, 64) 110656 \n", "_________________________________________________________________\n", "max_pooling3d_3 (MaxPooling3 (None, 3, 4, 3, 64) 0 \n", "_________________________________________________________________\n", "batch_normalization_3 (Batch (None, 3, 4, 3, 64) 256 \n", "_________________________________________________________________\n", "flatten (Flatten) (None, 2304) 0 \n", "_________________________________________________________________\n", "dense (Dense) (None, 1000) 2305000 \n", "_________________________________________________________________\n", "activation (Activation) (None, 1000) 0 \n", "_________________________________________________________________\n", "dense_1 (Dense) (None, 1000) 1001000 \n", "_________________________________________________________________\n", "activation_1 (Activation) (None, 1000) 0 \n", "_________________________________________________________________\n", "dense_2 (Dense) (None, 2) 2002 \n", "_________________________________________________________________\n", "activation_2 (Activation) (None, 2) 0 \n", "=================================================================\n", "Total params: 3,642,786\n", "Trainable params: 3,642,274\n", "Non-trainable params: 512\n", "_________________________________________________________________\n" ], "name": "stdout" } ] }, { "cell_type": "code", "metadata": { "id": "GFcVYeQSbNay" }, "source": [ "from numpy import argmax\n", "model.compile(loss='mse', optimizer='adam')\n", "print(\"model compiled\")\n", "\n", "from sklearn import metrics\n", "from sklearn.model_selection import KFold \n", "\n", "tx,*_=np.shape(X_all)\n", "Y_Pred_all = np.zeros(tx)\n", "seed = 7\n", "fold=10\n", "np.random.seed(seed)\n", "kfold = KFold(n_splits=fold, shuffle = True, random_state = seed)\n", "#Arrengement for K fold\n", "batch_size = 4\n", "no_epochs = 30\n", "no_classes = 2 #for parkinson and control\n", "verbosity = 1\n", "learning_rate = 0.0001\n", "\n", "count = 1\n", "for train, test in kfold.split(X_all,Y_all):\n", " print(np.shape(X_all[train]))\n", " print(np.shape(Y_all[train]))\n", " print(count,\". Fold\")\n", " count +=1\n", " history = model.fit(X_all[train], Y_all[train],\n", " batch_size=batch_size,\n", " epochs=no_epochs,\n", " verbose=verbosity)\n", " Y_Pred = model.predict(X_all[test])\n", " #model predicting\n", " \n", " Y_Pred = argmax(Y_Pred, axis=1)\n", " j = 0\n", " for i in test:\n", " Y_Pred_all[i] = Y_Pred[j]\n", " j+=1\n" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "G2nyauoobNdr" }, "source": [ "Y_True = Y_temp\n", "\n", "#for evaluating\n", "import sklearn.metrics as metrics\n", "acc = metrics.accuracy_score(Y_True, Y_Pred_all, normalize=True)\n", "f1 = metrics.f1_score(Y_True,Y_Pred_all, average=None)\n", "roc_auc = metrics.roc_auc_score(Y_True,Y_Pred_all)\n", "precision = metrics.precision_score(Y_True,Y_Pred_all, average=None)\n", "recall = metrics.recall_score(Y_True,Y_Pred_all, average=None)\n", "confusion = metrics.confusion_matrix(Y_True,Y_Pred_all)\n", "\n", "print(\"Accuracy: \"+str(acc))\n", "print(\"Roc_auc: \"+str(roc_auc))\n", "print(\"F1: \"+str(f1))\n", "print(\"Precision: \"+str(precision))\n", "print(\"Recall: \"+str(recall))\n", "print(\"Confusion: \"+str(confusion))" ], "execution_count": null, "outputs": [] } ] }