{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Ok8W5Ll1MJix", "outputId": "9987d2d1-81c4-45ad-e60a-9bf3f8c2adb5" }, "outputs": [], "source": [ "# import TensorFlow\n", "import tensorflow as tf\n", "\n", "#Check the version of TensorFlow you are using\n", "print(tf.__version__)\n", "print(tf.config.list_physical_devices('GPU'))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "lNjIhf3VSoRk", "outputId": "39d4abd3-441f-435b-d22f-8b316f56b0ae" }, "outputs": [], "source": [ "from platform import python_version\n", "print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "qaD2pvRm4FP2" }, "outputs": [], "source": [ "# Imports\n", "import os\n", "import sys\n", "import inspect\n", "import numpy as np\n", "import tensorflow as tf\n", "import matplotlib.pyplot as plt\n", "import matplotlib as mat\n", "import keras as K\n", "#import utils\n", "import sklearn as sk\n", "import seaborn as sns\n", "import pickle\n", "import shutil\n", "import cv2" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1LSdJEoJNukz", "outputId": "229fc025-affe-46b2-ee6a-cbe73545f6db" }, "outputs": [], "source": [ "%matplotlib inline\n", "from PIL import Image, ImageOps, ImageEnhance, ImageFile\n", "from numpy import random\n", "from IPython.display import SVG\n", "from distutils.version import LooseVersion as LV\n", "from sklearn.model_selection import train_test_split\n", "from shutil import copyfile\n", "from datetime import datetime\n", "from tensorflow.python.framework import ops\n", "from sklearn.metrics import accuracy_score\n", "from sklearn.metrics import precision_recall_fscore_support, confusion_matrix, classification_report, roc_curve, roc_auc_score" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Co759UtnO8SS", "outputId": "2173d9bc-1dd9-469c-f8fb-64d3859c3e8f" }, "outputs": [], "source": [ "from keras.models import Sequential,model_from_json\n", "#from keras.models import \n", "from keras.preprocessing import image\n", "from keras.preprocessing.image import ImageDataGenerator\n", "from keras.layers import Dense, Activation, Dropout, Flatten, MaxPooling2D,Conv2D\n", "from keras.layers.convolutional import Conv2D\n", "from keras import backend as K\n", "#from keras.layers.recurrent import SimpleRNN, LSTM, GRU \n", "#from keras.utils import np_utils\n", "from keras import __version__\n", "#from keras.utils.vis_utils import model_to_dot\n", "#from keras.datasets import mnist, fashion_mnist, imdb\n", "print('Using Keras version:', __version__, 'backend:', K.backend())\n", "assert(LV(__version__) >= LV(\"2.0.0\"))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "D9Y46EWwQdxG", "outputId": "446c0c0c-b682-49c9-f39c-f4713fc868ec" }, "outputs": [], "source": [ "base_dir = \"D:\\\\FOTOSDOC\\\\dataset\"\n", "train_dir = os.path.join(base_dir, 'train_set')\n", "validation_dir = os.path.join(base_dir, 'validation_set')\n", "test_dir = os.path.join(base_dir, 'test_set')\n", "#intact_dir=os.path.join(train_dir, 'intact')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "train_temp= \"D:\\\\FOTOSDOC\\\\train_temp\" # diretorio temporario" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def suavizar(image):\n", " suavizada = cv2.GaussianBlur(image, (5, 5), 0)\n", " return suavizada\n", "\n", "train_datagen = ImageDataGenerator(\n", " preprocessing_function=suavizar, shear_range=0.2,zoom_range=0.2, horizontal_flip=True, rescale=1./255\n", ")\n", "\n", "test_datagen = ImageDataGenerator(rescale=1./255)\n", "validation_datagen = ImageDataGenerator(rescale=1./255)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "jVwWvPv5QUak" }, "outputs": [], "source": [ "batch_size=6\n", "train_generator = train_datagen.flow_from_directory(\n", " 'D:\\\\FOTOSDOC\\\\dataset\\\\train_set',\n", " target_size=(128,128), # size image\n", " batch_size = batch_size, shuffle = True, class_mode='binary', classes=['damage', 'intact'] ) # labels binary" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "pPOPnMbBQVzp" }, "outputs": [], "source": [ "validation_generator = validation_datagen.flow_from_directory(\n", " 'D:\\\\FOTOSDOC\\\\dataset\\\\validation_set',\n", " target_size=(128,128), batch_size = 1, class_mode='binary', classes=['damage', 'intact'] )" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "lt06h6e15S3c" }, "outputs": [], "source": [ "test_generator = test_datagen.flow_from_directory(\n", " 'D:\\\\FOTOSDOC\\\\dataset\\\\test_set',\n", " target_size=(128,128), batch_size = 1, class_mode='binary', shuffle=False \n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "pDU_ci7vb-qa" }, "outputs": [], "source": [ "model = Sequential()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "-VOmaY6hbSxO" }, "outputs": [], "source": [ "# Primeira parte com convoluções e maxpooling = CAMADA 1 - LAYER 0\n", "#model.add(Conv2D(32, (3, 3), input_shape = (128, 128, 3), activation = 'relu'))\n", "model.add(Conv2D(32,(3,3), input_shape=(128,128,3)))\n", "model.add(Activation('relu'))\n", "model.add(MaxPooling2D(pool_size=(2, 2)))\n", "#model.add(Dropout(0.25))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "QCvUfHQVcHGv" }, "outputs": [], "source": [ "# Segunda parte com convoluções e maxpooling = CAMADA 2 - LAYER 1\n", "model.add(Conv2D(32,(3,3)))\n", "model.add(Activation('relu'))\n", "model.add(MaxPooling2D(pool_size=(2, 2)))\n", "model.add(Dropout(0.25))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ZbI34gL1cJTw" }, "outputs": [], "source": [ "#Terceira parte com convoluções e maxpooling= CAMADA 3 - LAYER 2\n", "model.add(Conv2D(32, (3,3)))\n", "model.add(Activation('relu'))\n", "model.add(MaxPooling2D(pool_size=(2, 2)))\n", "model.add(Dropout(0.25))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "-PT_dy2zdP5E" }, "outputs": [], "source": [ "#Flatten\n", "model.add(Flatten())" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ywcEwedVdgmM" }, "outputs": [], "source": [ "model.add(Dense(units = 128, activation = 'relu')) # altere os neuronios\n", "model.add(Dense(units = 128, activation = 'relu')) # altere os neuronios\n", "model.add(Dense(units = 1, activation = 'sigmoid'))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "CSZKxcLBO9U8" }, "outputs": [], "source": [ "model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "num_epochs = 5 #numero de vezes que vai rodar o algoritmo\n", "\n", "history_list = []\n", "accuracy_list = []\n", "loss_list = []\n", "val_accuracy_list = []\n", "val_loss_list = []\n", "# Loop de treinamento do modelo\n", "for epoch in range(num_epochs):\n", " history = model.fit(train_generator, epochs=200, validation_data=validation_generator)\n", " \n", " accuracy_list.append(history.history['accuracy'])\n", " loss_list.append(history.history['loss'])\n", " val_accuracy_list.append(history.history['val_accuracy'])\n", " val_loss_list.append(history.history['val_loss'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Carregando o arquivo .json e criando o modelo\n", "json_file = open('model.json','r')\n", "loaded_model_json = json_file.read()\n", "json_file.close()\n", "loaded_model = model_from_json(loaded_model_json)\n", "\n", "#Carregando os pesos para o modelo\n", "model=loaded_model.load_weights('model.h5')\n", "print('Loaded model from disk')\n", "\n", "\n", "np.random.seed(0)\n", "tf.random.set_seed(0)\n", "\n", "#Avaliando o modelo carregado no conjunto de teste\n", "loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n", "\n", "\n", "score = loaded_model.evaluate(test_generator)\n", "#,steps=5000\n", "print(\"{}: {}\".format(loaded_model.metrics_names[1],score[1]*100))\n", "print(\"{}: {}\".format(loaded_model.metrics_names[0],score[0]*100))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "loaded_model.summary()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "predictions = loaded_model.predict(test_generator)#probabilities" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "threshold=0.5\n", "prediction_percent=(predictions*100).round(4)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "predicted_labels = (predictions > threshold).astype(int) " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "true_labels = test_generator.classes# label true" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class_names = list(test_generator.class_indices.keys())\n", "print(class_names)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Calcule a matriz de confusão\n", "confusion = confusion_matrix(true_labels, predicted_labels)\n", "\n", "# Normalize a matriz de confusão para ter valores entre 0 e 1\n", "confusion = confusion.astype('float') / confusion.sum(axis=1)[:, np.newaxis]\n", "\n", "# Crie o heatmap da matriz de confusão\n", "plt.figure(figsize=(4, 3))\n", "sns.heatmap(confusion, annot=True, fmt=\".2f\", cmap=\"Blues\", cbar=False)\n", "plt.xlabel(\"Predicted\")\n", "plt.ylabel(\"True\")\n", "plt.title(\"Confusion Matrix\")\n", "plt.xticks(np.arange(len(class_names)) + 0.5, class_names)\n", "plt.yticks(np.arange(len(class_names)) + 0.5, class_names)\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from sklearn.metrics import \n", "# Calcular o ROC-AUC\n", "roc_auc = roc_auc_score(true_labels, predicted_labels)\n", "print(f'ROC-AUC: {roc_auc:.2f}')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "# curva ROC\n", "fpr, tpr, thresholds = roc_curve(true_labels, predicted_labels) \n", "roc_auc = roc_auc_score(true_labels, predicted_labels) #ROC-AUC\n", "\n", "plt.figure(figsize=(4, 3))\n", "plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC-AUC = {roc_auc:.2f}')\n", "plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n", "plt.xlim([0.0, 1.0])\n", "plt.ylim([0.0, 1.05])\n", "plt.xlabel('False Positive Rate')\n", "plt.ylabel('True Positive Rate')\n", "plt.title('ROC curve')\n", "plt.legend(loc='lower right')\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig, axes = plt.subplots(3, 5, figsize=(12, 6))\n", "axes = axes.ravel()\n", "predicted_classes = np.argmax(predictions, axis=1)\n", "class_labels = list(test_generator.class_indices.keys())\n", "\n", "for i in range(15):\n", " index = 40 + i # Starting from position\n", " axes[i].imshow(test_generator[index][0][0])\n", " predicted_label = class_labels[predicted_classes[index]]\n", " true_label = class_labels[true_labels[index]]\n", " confidence = predictions[index][predicted_classes[index]] * 100\n", " if confidence < (threshold*100):\n", " axes[i].set_title(\"Predicted: {} \\nConfidence: ({:.2f}%) \\nTrue: {}\".format(\"damage\", confidence, true_label), fontsize=8)\n", " axes[i].axis('off')\n", " else:\n", " axes[i].set_title(\"Predicted: {} \\nConfidence: ({:.2f}%) \\nTrue: {}\".format(\"intact\", confidence, true_label), fontsize=8)\n", " axes[i].axis('off')\n", "\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# plotar curvas de treinamento para accuracy\n", "for i in range(len(accuracy_list)):\n", " plt.plot(accuracy_list[i], label=f'Treino {i+1}')\n", "plt.title('Curvas de treinamento para accuracy')\n", "plt.ylabel('Accuracy')\n", "plt.xlabel('Epoch')\n", "plt.legend()\n", "plt.show()\n", "\n", "# plotar curvas de treinamento para loss\n", "for i in range(len(loss_list)):\n", " plt.plot(loss_list[i], label=f'Treino {i+1}')\n", "plt.title('Curvas de treinamento para loss')\n", "plt.ylabel('Loss')\n", "plt.xlabel('Epoch')\n", "plt.legend()\n", "plt.show()\n", "\n", "\n", "# plotar curvas de treinamento para val accuracy\n", "for i in range(len(val_accuracy_list)):\n", " plt.plot(val_accuracy_list[i], label=f'Treino {i+1}')\n", "plt.title('Curvas de treinamento para val_accuracy')\n", "plt.ylabel('Val_Accuracy')\n", "plt.xlabel('Epoch')\n", "plt.legend()\n", "plt.show()\n", "\n", "# plotar curvas de treinamento para val loss\n", "for i in range(len(val_loss_list)):\n", " plt.plot(val_loss_list[i], label=f'Treino {i+1}')\n", "plt.title('Curvas de treinamento para val_loss')\n", "plt.ylabel('Val_Loss')\n", "plt.xlabel('Epoch')\n", "plt.legend()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "accuracy_mean = np.mean(accuracy_list, axis=0, dtype=object)\n", "val_accuracy_mean = np.mean(val_accuracy_list, axis=0, dtype=object)\n", "\n", "loss_mean = np.mean(loss_list, axis=0, dtype=object)\n", "val_loss_mean = np.mean(val_loss_list, axis=0, dtype=object)\n", "\n", "plt.plot(accuracy_mean * 100, label='Training', linestyle='--')\n", "plt.plot(val_accuracy_mean * 100, label='Validation')\n", "plt.ylabel('Accuracy (%)')\n", "plt.xlabel('Epoch')\n", "plt.legend()\n", "plt.show()\n", "\n", "plt.plot(loss_mean * 100, label='Training', linestyle='--')\n", "plt.plot(val_loss_mean * 100, label='Validation')\n", "plt.ylabel('Loss (%)')\n", "plt.xlabel('Epoch')\n", "plt.legend()\n", "plt.show()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "pip install scikit-image" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "5qi2N-ApEBeE" }, "outputs": [], "source": [ "loaded_model.load_weights('model.h5')\n", "filters = model.layers[0].get_weights()[0]\n", "\n", "from skimage.color import rgb2gray\n", "filters = model.weights[0].numpy()\n", "filters_norm= (filters - np.min(filters)) / (np.max(filters) - np.min(filters))\n", "fig, axes = plt.subplots(4, 4, figsize=(6, 6))\n", "for i, ax in enumerate(axes.flatten()):\n", " ax.imshow(np.squeeze(filters_norm[:, :, :, i]), cmap='gray')\n", " ax.set_yticks([])\n", " ax.set_xticks([])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#feature\n", "\n", "filters = loaded_model.layers[0].get_weights()[0]\n", "\n", "filters_gray = np.mean(filters, axis=2)\n", "fig.subplots_adjust(hspace=0.1, wspace=0.1)\n", "\n", "filters_norm = (filters_gray - np.min(filters_gray)) / (np.max(filters_gray) - np.min(filters_gray))\n", "\n", "fig, axes = plt.subplots(4, 4, figsize=(6, 6))\n", "for i, ax in enumerate(axes.flatten()):\n", " ax.imshow(filters_norm[:, :, i], cmap='gray')\n", " ax.set_yticks([])\n", " ax.set_xticks([])\n", "\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#features escala de cinza\n", "inp = model.layers[0].input\n", "outs = [layer.output for layer in model.layers]\n", "\n", "layerized_model = tf.keras.models.Model(inp, outs)\n", "Omat = layerized_model.predict(test_generator)\n", "\n", "fig, axes = plt.subplots(4, 4, figsize=(4, 4))\n", "\n", "for i, ax in enumerate(axes.flatten()):\n", " ax.imshow(np.squeeze(Omat[0][0, :, :, i]), cmap='gray')\n", " ax.set_yticks([])\n", " ax.set_xticks([])\n", "\n", "fig, axes = plt.subplots(4, 4, figsize=(4, 4))\n", "\n", "for i, ax in enumerate(axes.flatten()):\n", " ax.imshow(np.squeeze(Omat[1][0, :, :, i]), cmap='gray')\n", " ax.set_yticks([])\n", " ax.set_xticks([])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#plota em portugues\n", "fig, axes = plt.subplots(6, 5, figsize=(8, 8))\n", "axes = axes.ravel()\n", "\n", "for i in range(30):\n", " axes[i].imshow(test_generator[i][0][0])\n", " predicted_label = class_labels[predicted_classes[i]]\n", " true_label = class_labels[true_classes[i]]\n", " confidence = predictions[i][predicted_classes[i]] * 100\n", " if confidence<50:\n", " axes[i].set_title(\"Predição: {} \\nConfiance: ({:.2f}%)\".format(\"Intacto\", confidence), fontsize=8)\n", " axes[i].axis('off')\n", " else:\n", " axes[i].set_title(\"Predição: {} \\nConfiance: ({:.2f}%)\".format(\"Dano\", confidence), fontsize=8)\n", " axes[i].axis('off')\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#plota em ingles\n", "fig, axes = plt.subplots(6, 5, figsize=(8, 8))\n", "axes = axes.ravel()\n", "\n", "for i in range(30):\n", " axes[i].imshow(test_generator[i][0][0])\n", " predicted_label = class_labels[predicted_classes[i]]\n", " true_label = class_labels[true_classes[i]]\n", " confidence = predictions[i][predicted_classes[i]] * 100\n", " if confidence<50:\n", " axes[i].set_title(\"Predição: {} \\nConfiance: ({:.2f}%)\".format(\"Intact\", confidence), fontsize=8)\n", " axes[i].axis('off')\n", " else:\n", " axes[i].set_title(\"Predição: {} \\nConfiance: ({:.2f}%)\".format(\"Damage\", confidence), fontsize=8)\n", " axes[i].axis('off')\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "img_path = 'D:\\\\FOTOSDOC\\\\dataset\\\\test_set\\\\damage\\\\20220417_121557.jpg'\n", "img = Image.open(img_path)\n", "\n", "img_flipped = img.transpose(method=Image.FLIP_LEFT_RIGHT)\n", "img_rotated = img.rotate(45)\n", "\n", "width, height = img.size\n", "new_width, new_height = int(width * 0.6), int(height * 0.4)\n", "\n", "# Redimensionando com rescale\n", "img_rescaled = img.resize((new_width, new_height))\n", "\n", "# Cria uma figura com dois subplots\n", "fig, axes = plt.subplots(nrows=2, ncols=2)\n", "\n", "# Plota a imagem original no primeiro subplot\n", "axes[0,0].imshow(np.asarray(img))\n", "axes[0,0].set_title(\"Original\", fontsize=8)\n", "\n", "# Plota a imagem modificada pelo data augmentation no segundo subplot\n", "axes[0,1].imshow(np.asarray(img_flipped))\n", "axes[0,1].set_title(\"Flip\", fontsize=8)\n", "\n", "# Plota a imagem modificada pelo data augmentation no terceiro subplot\n", "axes[1,0].imshow(np.asarray(img_rotated))\n", "axes[1,0].set_title(\"Rotation\", fontsize=8)\n", "\n", "# Plota a imagem original no primeiro subplot\n", "axes[1,1].imshow(np.asarray(img_rescaled))\n", "axes[1,1].set_title(\"Rescale\", fontsize=8)\n", "\n", "plt.subplots_adjust(wspace=0.1, hspace=0.05)\n", "# Remove as marcas de ticks dos subplots\n", "for ax in axes.flat:\n", " ax.set_xticks([])\n", " ax.set_yticks([])\n", "\n", "# Exibe a figura\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "path = 'D:\\\\FOTOSDOC\\\\dataset\\\\test_set\\\\damage'\n", "fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(4, 3))\n", "ax = ax.flatten()\n", "for i, img in enumerate(os.listdir(path)[:4]):\n", " img_path = os.path.join(path, img)\n", " if os.path.isfile(img_path):\n", " image = Image.open(img_path)\n", " image.thumbnail((100, 100))\n", " ax[i].imshow(image)\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "path = 'D:\\\\FOTOSDOC\\\\dataset\\\\test_set\\\\intact'\n", "fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(4, 3))\n", "ax = ax.flatten()\n", "for i, img in enumerate(os.listdir(path)[:4]):\n", " img_path = os.path.join(path, img)\n", " if os.path.isfile(img_path):\n", " image = Image.open(img_path)\n", " image.thumbnail((100, 100))\n", " ax[i].imshow(image)\n", "plt.tight_layout()\n", "plt.show()" ] } ], "metadata": { "accelerator": "GPU", "colab": { "provenance": [] }, "gpuClass": "standard", "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.9" } }, "nbformat": 4, "nbformat_minor": 1 }