{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "RSxxofR3yReM" }, "source": [ "# 1. Import Dependencies" ] }, { "cell_type": "code", "execution_count": 153, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "HCM8-I3eybBn", "outputId": "2eab7620-04bf-45e6-d225-84b8a84050c0" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting gymnasium==0.28.1\n", " Using cached gymnasium-0.28.1-py3-none-any.whl.metadata (9.2 kB)\n", "Requirement already satisfied: numpy>=1.21.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium==0.28.1) (1.26.3)\n", "Collecting jax-jumpy>=1.0.0 (from gymnasium==0.28.1)\n", " Using cached jax_jumpy-1.0.0-py3-none-any.whl.metadata (15 kB)\n", "Requirement already satisfied: cloudpickle>=1.2.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium==0.28.1) (3.0.0)\n", "Requirement already satisfied: typing-extensions>=4.3.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium==0.28.1) (4.9.0)\n", "Requirement already satisfied: farama-notifications>=0.0.1 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium==0.28.1) (0.0.4)\n", "Using cached gymnasium-0.28.1-py3-none-any.whl (925 kB)\n", "Using cached jax_jumpy-1.0.0-py3-none-any.whl (20 kB)\n", "Installing collected packages: jax-jumpy, gymnasium\n", " Attempting uninstall: gymnasium\n", " Found existing installation: gymnasium 0.29.1\n", " Uninstalling gymnasium-0.29.1:\n", " Successfully uninstalled gymnasium-0.29.1\n", "Successfully installed gymnasium-0.28.1 jax-jumpy-1.0.0\n", "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython3.11 -m pip install --upgrade pip\u001b[0m\n", "Requirement already satisfied: shimmy>=0.2.1 in /opt/homebrew/lib/python3.11/site-packages (1.3.0)\n", "Requirement already satisfied: numpy>=1.18.0 in /opt/homebrew/lib/python3.11/site-packages (from shimmy>=0.2.1) (1.26.3)\n", "Requirement already satisfied: gymnasium>=0.27.0 in /opt/homebrew/lib/python3.11/site-packages (from shimmy>=0.2.1) (0.28.1)\n", "Requirement already satisfied: jax-jumpy>=1.0.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium>=0.27.0->shimmy>=0.2.1) (1.0.0)\n", "Requirement already satisfied: cloudpickle>=1.2.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium>=0.27.0->shimmy>=0.2.1) (3.0.0)\n", "Requirement already satisfied: typing-extensions>=4.3.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium>=0.27.0->shimmy>=0.2.1) (4.9.0)\n", "Requirement already satisfied: farama-notifications>=0.0.1 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium>=0.27.0->shimmy>=0.2.1) (0.0.4)\n", "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython3.11 -m pip install --upgrade pip\u001b[0m\n", "Requirement already satisfied: stable-baselines3 in /opt/homebrew/lib/python3.11/site-packages (2.2.1)\n", "Requirement already satisfied: gymnasium<0.30,>=0.28.1 in /opt/homebrew/lib/python3.11/site-packages (from stable-baselines3) (0.28.1)\n", "Requirement already satisfied: numpy>=1.20 in /opt/homebrew/lib/python3.11/site-packages (from stable-baselines3) (1.26.3)\n", "Requirement already satisfied: torch>=1.13 in /opt/homebrew/lib/python3.11/site-packages (from stable-baselines3) (2.1.2)\n", "Requirement already satisfied: cloudpickle in /opt/homebrew/lib/python3.11/site-packages (from stable-baselines3) (3.0.0)\n", "Requirement already satisfied: pandas in /opt/homebrew/lib/python3.11/site-packages (from stable-baselines3) (2.1.4)\n", "Requirement already satisfied: matplotlib in /opt/homebrew/lib/python3.11/site-packages (from stable-baselines3) (3.8.2)\n", "Requirement already satisfied: jax-jumpy>=1.0.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium<0.30,>=0.28.1->stable-baselines3) (1.0.0)\n", "Requirement already satisfied: typing-extensions>=4.3.0 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium<0.30,>=0.28.1->stable-baselines3) (4.9.0)\n", "Requirement already satisfied: farama-notifications>=0.0.1 in /opt/homebrew/lib/python3.11/site-packages (from gymnasium<0.30,>=0.28.1->stable-baselines3) (0.0.4)\n", "Requirement already satisfied: filelock in /opt/homebrew/lib/python3.11/site-packages (from torch>=1.13->stable-baselines3) (3.13.1)\n", "Requirement already satisfied: sympy in /opt/homebrew/lib/python3.11/site-packages (from torch>=1.13->stable-baselines3) (1.12)\n", "Requirement already satisfied: networkx in /opt/homebrew/lib/python3.11/site-packages (from torch>=1.13->stable-baselines3) (3.2.1)\n", "Requirement already satisfied: jinja2 in /opt/homebrew/lib/python3.11/site-packages (from torch>=1.13->stable-baselines3) (3.1.3)\n", "Requirement already satisfied: fsspec in /opt/homebrew/lib/python3.11/site-packages (from torch>=1.13->stable-baselines3) (2023.12.2)\n", "Requirement already satisfied: contourpy>=1.0.1 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (1.2.0)\n", "Requirement already satisfied: cycler>=0.10 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (0.12.1)\n", "Requirement already satisfied: fonttools>=4.22.0 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (4.47.2)\n", "Requirement already satisfied: kiwisolver>=1.3.1 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (1.4.5)\n", "Requirement already satisfied: packaging>=20.0 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (23.2)\n", "Requirement already satisfied: pillow>=8 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (10.2.0)\n", "Requirement already satisfied: pyparsing>=2.3.1 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (3.1.1)\n", "Requirement already satisfied: python-dateutil>=2.7 in /opt/homebrew/lib/python3.11/site-packages (from matplotlib->stable-baselines3) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /opt/homebrew/lib/python3.11/site-packages (from pandas->stable-baselines3) (2023.3.post1)\n", "Requirement already satisfied: tzdata>=2022.1 in /opt/homebrew/lib/python3.11/site-packages (from pandas->stable-baselines3) (2023.4)\n", "Requirement already satisfied: six>=1.5 in /opt/homebrew/lib/python3.11/site-packages (from python-dateutil>=2.7->matplotlib->stable-baselines3) (1.16.0)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /opt/homebrew/lib/python3.11/site-packages (from jinja2->torch>=1.13->stable-baselines3) (2.1.4)\n", "Requirement already satisfied: mpmath>=0.19 in /opt/homebrew/lib/python3.11/site-packages (from sympy->torch>=1.13->stable-baselines3) (1.3.0)\n", "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython3.11 -m pip install --upgrade pip\u001b[0m\n" ] } ], "source": [ "!pip install gymnasium==0.28.1\n", "!pip install 'shimmy>=0.2.1'\n", "!pip install stable-baselines3" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "id": "rqk5d-ctyReM" }, "outputs": [], "source": [ "import gym\n", "import numpy as np\n", "import random\n", "import os\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "from gym import Env\n", "from gym.spaces import Discrete, Box, Dict, Tuple, MultiBinary, MultiDiscrete\n", "from stable_baselines3 import PPO\n", "from stable_baselines3.common.vec_env import VecFrameStack\n", "from stable_baselines3.common.evaluation import evaluate_policy\n", "from collections import deque\n", "from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, accuracy_score\n", "from sklearn.neighbors import KNeighborsClassifier\n", "from sklearn.model_selection import train_test_split" ] }, { "cell_type": "markdown", "metadata": { "id": "tyXhlzHayReO" }, "source": [ "# 2. Building Replay Buffer & Environment" ] }, { "cell_type": "code", "execution_count": 317, "metadata": { "id": "GO67SsZObvqu" }, "outputs": [], "source": [ "class ReplayBuffer:\n", " def __init__(self, buffer_size=2000):\n", " self.buffer = deque(maxlen=buffer_size)\n", " self.negative_experiences = set()\n", " self.positive_experiences = set()\n", "\n", " def add(self, experience):\n", " state, action, reward, next_state, done = experience\n", " state_tuple = tuple(state)\n", " next_state_tuple = tuple(next_state)\n", " if reward <= 0:\n", " self.negative_experiences.add((state_tuple, action))\n", " elif reward == 4:\n", " self.positive_experiences.add((state_tuple, action))\n", " #self.buffer.append(experience)\n", " self.buffer.append((state_tuple, action, reward, next_state_tuple, done))\n", "\n", " # Print for debugging\n", " # print(f\"Added experience: {experience}\")\n", "\n", " def sample(self, batch_size):\n", " sample = random.sample(self.buffer, batch_size)\n", "\n", " # Print for debugging\n", " #print(f\"Sampled batch: {sample}\")\n", "\n", " return sample\n", "\n", " def __len__(self):\n", " return len(self.buffer)\n" ] }, { "cell_type": "code", "execution_count": 318, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "RmAxgtE3bxIv", "outputId": "70c1dece-61cf-4606-f164-2f4b62bedb38" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "All tests passed!\n" ] } ], "source": [ "def test_replay_buffer():\n", " buffer = ReplayBuffer(buffer_size=10)\n", "\n", " # Add some experiences\n", " for i in range(5):\n", " state = tuple(np.array([i, i+1, i+2, i+3]))\n", " action = i % 3\n", " reward = i\n", " next_state = tuple(np.array(state) + 1)\n", " done = i == 3\n", " buffer.add((state, action, reward, next_state, done))\n", "\n", " # Check buffer length\n", " assert len(buffer) == 5, \"Buffer length should be 5\"\n", "\n", " # Sample from buffer\n", " batch = buffer.sample(3)\n", " assert len(batch) == 3, \"Sampled batch size should be 3\"\n", "\n", " # Check content of buffer\n", " for experience in batch:\n", " state, action, reward, next_state, done = experience\n", " assert isinstance(state, tuple), \"State should be a tuple\"\n", " assert isinstance(next_state, tuple), \"Next state should be a tuple\"\n", " assert isinstance(action, int), \"Action should be an int\"\n", " assert isinstance(reward, int), \"Reward should be an int\"\n", " assert isinstance(done, bool), \"Done should be a bool\"\n", "\n", " print(\"All tests passed!\")\n", "\n", "# Run the test\n", "test_replay_buffer()\n" ] }, { "cell_type": "code", "execution_count": 854, "metadata": { "id": "zeTEPw3yyReO" }, "outputs": [], "source": [ "class PouroverEnv(Env):\n", " def __init__(self):\n", " self.action_space = Discrete(3)\n", "\n", " # Combine all observation spaces into a single Box space\n", " # 1:extra fine, 2:fine, 3:fine-medium, 4:medium, 5:medium-coarse, 6:coarse, 7:extra coarse\n", " low = np.array([1, 10, 120, 84], dtype=np.float32)\n", " high = np.array([7, 20, 300, 94], dtype=np.float32) # 5 minutes * 60 seconds = 300 seconds\n", " self.observation_space = Box(low, high, dtype=np.float32)\n", "\n", " self.reset()\n", "\n", " def step(self, action):\n", "\n", " self.state[0] += action - 1\n", " self.state[1] += action - 1\n", " self.state[2] += (action - 1) * 5 #5 seconds for each action\n", " self.state[3] += action - 1\n", "\n", "\n", " # Apply absolute value\n", " self.state = np.abs(self.state)\n", "\n", " # Clip the state values to their respective bounds\n", " self.state[0] = np.clip(self.state[0], 3, 5)\n", " #self.state[1] = np.clip(self.state[1], 12, 18)\n", " self.state[2] = int(np.round(np.clip(self.state[2], 120, 240)/5) * 5)\n", " #self.state[3] = np.clip(self.state[3], 86, 92)\n", "\n", " self.gsize_length -= 1\n", " self.bratio_length -= 1\n", " self.btime_length -= 1\n", " self.temperature_length -= 1\n", "\n", " reward = 0\n", " gsize_state, bratio_state, btime_state, temperature_state = self.state\n", " # levelroast_length, gsize_length, bratio_length, btime_length, temperature_length = self.length\n", "\n", "\n", " if gsize_state == 3:\n", " reward +=1\n", " if 12 <= bratio_state <= 14:\n", " reward += 1\n", " if 120 <= btime_state <= 180:\n", " reward += 1\n", " if 86 <= temperature_state <= 88:\n", " reward += 1\n", " elif gsize_state == 4:\n", " reward += 1\n", " if 14 <= bratio_state <= 16:\n", " reward += 1\n", " if 150 <= btime_state <= 180:\n", " reward += 1\n", " if 88 <= temperature_state <= 90:\n", " reward += 1\n", " elif gsize_state == 5:\n", " reward += 1\n", " if 16 <= bratio_state <= 18:\n", " reward += 1\n", " if 210 <= btime_state <= 240:\n", " reward += 1\n", " if 90 <= temperature_state <= 92:\n", " reward += 1\n", "\n", " #done = np.all(self.state == np.array([1, 2, 12, 1, 80], dtype=np.float32))\n", " done = self.gsize_length <= 0 and self.bratio_length <= 0 and self.btime_length <= 0 and self.temperature_length <= 0\n", "\n", " info = {}\n", " state = (self.state[0], self.state[1], self.state[2], self.state[3])\n", " return self.state, reward, done, info\n", "\n", " def get_evaluation_action(self, state):\n", " #0: turun, 1: tetap, 2: naik\n", " gsize_state, bratio_state, btime_state, temperature_state = state\n", "\n", " if 3 <= gsize_state <= 5:\n", " if gsize_state == 3:\n", " if 12 <= bratio_state <= 14 and 120 <= btime_state <= 180 and 86 <= temperature_state <= 88:\n", " return 1\n", " if bratio_state < 12:\n", " return 2\n", " if bratio_state > 14:\n", " return 0\n", " if btime_state < 120:\n", " return 2\n", " if btime_state > 180:\n", " return 0\n", " if temperature_state < 86:\n", " return 2\n", " if temperature_state > 88:\n", " return 0\n", " elif gsize_state == 4:\n", " if 14 <= bratio_state <= 16 and 150 <= btime_state <= 180 and 88 <= temperature_state <= 90:\n", " return 1\n", " if bratio_state < 14:\n", " return 2\n", " if bratio_state > 16:\n", " return 0\n", " if btime_state < 150:\n", " return 2\n", " if btime_state > 180:\n", " return 0\n", " if temperature_state < 88:\n", " return 2\n", " if temperature_state > 90:\n", " return 0\n", " elif gsize_state == 5:\n", " if 16 <= bratio_state <= 18 and 210 <= btime_state <= 240 and 90 <= temperature_state <= 92:\n", " return 1\n", " if bratio_state < 16:\n", " return 2\n", " if bratio_state > 18:\n", " return 0\n", " if btime_state < 210:\n", " return 2\n", " if btime_state > 240:\n", " return 0\n", " if temperature_state < 90:\n", " return 2\n", " if temperature_state > 92:\n", " return 0\n", " elif gsize_state > 5:\n", " return 0\n", " elif gsize_state < 3:\n", " return 2\n", "\n", " return 1 # Default action\n", "\n", " def reset(self):\n", " #self.gsize_state = 1 + random.randint(0, 6)\n", " self.gsize_state = 4 + random.randint(-1,1)\n", " #self.bratio_state = 10 + random.randint(0, 10) # 14 to 20\n", " self.bratio_state = 15 + random.randint(-1,1)\n", " # brew time based on levelroast and grindsize\n", " #self.btime_state = 120 + random.randint(0, 36) * 5\n", " self.btime_state = 120 + random.randint(-5, 5)\n", " # temperature based on levelroast and grindsize\n", " #self.temperature_state = 84 + random.randint(0, 10) # Default range for levelroast 2\n", " self.temperature_state = 89 + random.randint(-1,1)\n", "\n", " self.state = np.array([\n", " self.gsize_state,\n", " self.bratio_state,\n", " self.btime_state,\n", " self.temperature_state\n", " ]).astype(float)\n", "\n", " self.gsize_length = self.bratio_length = self.btime_length = self.temperature_length = 60\n", " return self.state\n", "\n", "\n", " def render(self):\n", " pass\n", "\n" ] }, { "cell_type": "code", "execution_count": 855, "metadata": { "id": "_QTBXqxkHgrE" }, "outputs": [], "source": [ "# Instantiate the environment\n", "env = PouroverEnv()\n", "buffer = ReplayBuffer(buffer_size=2000)" ] }, { "cell_type": "code", "execution_count": 856, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "plHY9owqyReO", "outputId": "4cf5f79f-dd82-4c29-8ea3-90c9188020ce" }, "outputs": [ { "data": { "text/plain": [ "array([ 1.6069459, 18.634115 , 156.41008 , 84.259125 ], dtype=float32)" ] }, "execution_count": 856, "metadata": {}, "output_type": "execute_result" } ], "source": [ "env.observation_space.sample()" ] }, { "cell_type": "code", "execution_count": 857, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "pURSX3GjyReO", "outputId": "eeb77215-e0fa-4617-eb9d-d6826051c7de" }, "outputs": [ { "data": { "text/plain": [ "array([ 4., 14., 120., 88.])" ] }, "execution_count": 857, "metadata": {}, "output_type": "execute_result" } ], "source": [ "env.reset()" ] }, { "cell_type": "code", "execution_count": 758, "metadata": { "id": "XcaePsQOyReO" }, "outputs": [], "source": [ "from stable_baselines3.common.env_checker import check_env" ] }, { "cell_type": "markdown", "metadata": { "id": "ez3TXaoOyReP" }, "source": [ "# 5. Train Model" ] }, { "cell_type": "code", "execution_count": 357, "metadata": { "id": "0XD6XKVMORD_" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using cpu device\n", "Wrapping the env with a `Monitor` wrapper\n", "Wrapping the env in a DummyVecEnv.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/opt/homebrew/lib/python3.11/site-packages/stable_baselines3/common/vec_env/patch_gym.py:49: UserWarning: You provided an OpenAI Gym environment. We strongly recommend transitioning to Gymnasium environments. Stable-Baselines3 is automatically wrapping your environments in a compatibility layer, which could potentially cause issues.\n", " warnings.warn(\n" ] } ], "source": [ "log_path = os.path.join('Training', 'Logs')\n", "model = PPO(\"MlpPolicy\", env, verbose=1, tensorboard_log=log_path)" ] }, { "cell_type": "code", "execution_count": 1024, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "fWCf_R_ndPki", "outputId": "e24d17fb-dbbd-4ab0-f4e5-a8ba50ca787e" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using cpu device\n", "Wrapping the env with a `Monitor` wrapper\n", "Wrapping the env in a DummyVecEnv.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/opt/homebrew/lib/python3.11/site-packages/stable_baselines3/common/vec_env/patch_gym.py:49: UserWarning: You provided an OpenAI Gym environment. We strongly recommend transitioning to Gymnasium environments. Stable-Baselines3 is automatically wrapping your environments in a compatibility layer, which could potentially cause issues.\n", " warnings.warn(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "---------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 125 |\n", "| time/ | |\n", "| fps | 4360 |\n", "| iterations | 1 |\n", "| time_elapsed | 0 |\n", "| total_timesteps | 2048 |\n", "---------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 125 |\n", "| time/ | |\n", "| fps | 2894 |\n", "| iterations | 2 |\n", "| time_elapsed | 1 |\n", "| total_timesteps | 4096 |\n", "| train/ | |\n", "| approx_kl | 0.011200322 |\n", "| clip_fraction | 0.0743 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.09 |\n", "| explained_variance | 0.00121 |\n", "| learning_rate | 0.0003 |\n", "| loss | 106 |\n", "| n_updates | 10 |\n", "| policy_gradient_loss | -0.00548 |\n", "| value_loss | 395 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 124 |\n", "| time/ | |\n", "| fps | 2637 |\n", "| iterations | 3 |\n", "| time_elapsed | 2 |\n", "| total_timesteps | 6144 |\n", "| train/ | |\n", "| approx_kl | 0.009666683 |\n", "| clip_fraction | 0.0492 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.07 |\n", "| explained_variance | 0.000656 |\n", "| learning_rate | 0.0003 |\n", "| loss | 182 |\n", "| n_updates | 20 |\n", "| policy_gradient_loss | -0.00506 |\n", "| value_loss | 458 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 124 |\n", "| time/ | |\n", "| fps | 2524 |\n", "| iterations | 4 |\n", "| time_elapsed | 3 |\n", "| total_timesteps | 8192 |\n", "| train/ | |\n", "| approx_kl | 0.0076989518 |\n", "| clip_fraction | 0.0795 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.04 |\n", "| explained_variance | 0.000192 |\n", "| learning_rate | 0.0003 |\n", "| loss | 139 |\n", "| n_updates | 30 |\n", "| policy_gradient_loss | -0.0055 |\n", "| value_loss | 386 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 126 |\n", "| time/ | |\n", "| fps | 2472 |\n", "| iterations | 5 |\n", "| time_elapsed | 4 |\n", "| total_timesteps | 10240 |\n", "| train/ | |\n", "| approx_kl | 0.002638106 |\n", "| clip_fraction | 0.0307 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.03 |\n", "| explained_variance | 0.00236 |\n", "| learning_rate | 0.0003 |\n", "| loss | 144 |\n", "| n_updates | 40 |\n", "| policy_gradient_loss | -0.00148 |\n", "| value_loss | 375 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 129 |\n", "| time/ | |\n", "| fps | 2436 |\n", "| iterations | 6 |\n", "| time_elapsed | 5 |\n", "| total_timesteps | 12288 |\n", "| train/ | |\n", "| approx_kl | 0.0064966576 |\n", "| clip_fraction | 0.0154 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.02 |\n", "| explained_variance | 0.0281 |\n", "| learning_rate | 0.0003 |\n", "| loss | 170 |\n", "| n_updates | 50 |\n", "| policy_gradient_loss | -0.00172 |\n", "| value_loss | 439 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 132 |\n", "| time/ | |\n", "| fps | 2413 |\n", "| iterations | 7 |\n", "| time_elapsed | 5 |\n", "| total_timesteps | 14336 |\n", "| train/ | |\n", "| approx_kl | 0.0013821615 |\n", "| clip_fraction | 0.0115 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1 |\n", "| explained_variance | 0.127 |\n", "| learning_rate | 0.0003 |\n", "| loss | 216 |\n", "| n_updates | 60 |\n", "| policy_gradient_loss | 7.52e-05 |\n", "| value_loss | 398 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 136 |\n", "| time/ | |\n", "| fps | 2397 |\n", "| iterations | 8 |\n", "| time_elapsed | 6 |\n", "| total_timesteps | 16384 |\n", "| train/ | |\n", "| approx_kl | 0.018821292 |\n", "| clip_fraction | 0.118 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.951 |\n", "| explained_variance | 0.165 |\n", "| learning_rate | 0.0003 |\n", "| loss | 189 |\n", "| n_updates | 70 |\n", "| policy_gradient_loss | -0.00817 |\n", "| value_loss | 427 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 136 |\n", "| time/ | |\n", "| fps | 2384 |\n", "| iterations | 9 |\n", "| time_elapsed | 7 |\n", "| total_timesteps | 18432 |\n", "| train/ | |\n", "| approx_kl | 0.008349581 |\n", "| clip_fraction | 0.016 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.967 |\n", "| explained_variance | 0.225 |\n", "| learning_rate | 0.0003 |\n", "| loss | 235 |\n", "| n_updates | 80 |\n", "| policy_gradient_loss | -0.00132 |\n", "| value_loss | 480 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 136 |\n", "| time/ | |\n", "| fps | 2373 |\n", "| iterations | 10 |\n", "| time_elapsed | 8 |\n", "| total_timesteps | 20480 |\n", "| train/ | |\n", "| approx_kl | 0.0058731446 |\n", "| clip_fraction | 0.0138 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1 |\n", "| explained_variance | 0.367 |\n", "| learning_rate | 0.0003 |\n", "| loss | 176 |\n", "| n_updates | 90 |\n", "| policy_gradient_loss | -0.000959 |\n", "| value_loss | 365 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 134 |\n", "| time/ | |\n", "| fps | 2364 |\n", "| iterations | 11 |\n", "| time_elapsed | 9 |\n", "| total_timesteps | 22528 |\n", "| train/ | |\n", "| approx_kl | 0.008399885 |\n", "| clip_fraction | 0.0175 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.02 |\n", "| explained_variance | 0.207 |\n", "| learning_rate | 0.0003 |\n", "| loss | 263 |\n", "| n_updates | 100 |\n", "| policy_gradient_loss | -0.000458 |\n", "| value_loss | 455 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 135 |\n", "| time/ | |\n", "| fps | 2357 |\n", "| iterations | 12 |\n", "| time_elapsed | 10 |\n", "| total_timesteps | 24576 |\n", "| train/ | |\n", "| approx_kl | 0.0047428766 |\n", "| clip_fraction | 0.0298 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.02 |\n", "| explained_variance | 0.336 |\n", "| learning_rate | 0.0003 |\n", "| loss | 176 |\n", "| n_updates | 110 |\n", "| policy_gradient_loss | -0.00408 |\n", "| value_loss | 396 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 133 |\n", "| time/ | |\n", "| fps | 2351 |\n", "| iterations | 13 |\n", "| time_elapsed | 11 |\n", "| total_timesteps | 26624 |\n", "| train/ | |\n", "| approx_kl | 0.004083778 |\n", "| clip_fraction | 0.0277 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.03 |\n", "| explained_variance | 0.467 |\n", "| learning_rate | 0.0003 |\n", "| loss | 178 |\n", "| n_updates | 120 |\n", "| policy_gradient_loss | -0.00207 |\n", "| value_loss | 349 |\n", "-----------------------------------------\n", "----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 135 |\n", "| time/ | |\n", "| fps | 2345 |\n", "| iterations | 14 |\n", "| time_elapsed | 12 |\n", "| total_timesteps | 28672 |\n", "| train/ | |\n", "| approx_kl | 0.00661338 |\n", "| clip_fraction | 0.0253 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1.02 |\n", "| explained_variance | 0.413 |\n", "| learning_rate | 0.0003 |\n", "| loss | 244 |\n", "| n_updates | 130 |\n", "| policy_gradient_loss | -0.00248 |\n", "| value_loss | 391 |\n", "----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 137 |\n", "| time/ | |\n", "| fps | 2340 |\n", "| iterations | 15 |\n", "| time_elapsed | 13 |\n", "| total_timesteps | 30720 |\n", "| train/ | |\n", "| approx_kl | 0.0058042724 |\n", "| clip_fraction | 0.0533 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -1 |\n", "| explained_variance | 0.176 |\n", "| learning_rate | 0.0003 |\n", "| loss | 227 |\n", "| n_updates | 140 |\n", "| policy_gradient_loss | -0.00123 |\n", "| value_loss | 484 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 136 |\n", "| time/ | |\n", "| fps | 2336 |\n", "| iterations | 16 |\n", "| time_elapsed | 14 |\n", "| total_timesteps | 32768 |\n", "| train/ | |\n", "| approx_kl | 0.004443947 |\n", "| clip_fraction | 0.0309 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.997 |\n", "| explained_variance | 0.421 |\n", "| learning_rate | 0.0003 |\n", "| loss | 222 |\n", "| n_updates | 150 |\n", "| policy_gradient_loss | -0.00131 |\n", "| value_loss | 396 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 134 |\n", "| time/ | |\n", "| fps | 2333 |\n", "| iterations | 17 |\n", "| time_elapsed | 14 |\n", "| total_timesteps | 34816 |\n", "| train/ | |\n", "| approx_kl | 0.013686146 |\n", "| clip_fraction | 0.0274 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.97 |\n", "| explained_variance | 0.332 |\n", "| learning_rate | 0.0003 |\n", "| loss | 188 |\n", "| n_updates | 160 |\n", "| policy_gradient_loss | -0.000571 |\n", "| value_loss | 413 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 136 |\n", "| time/ | |\n", "| fps | 2330 |\n", "| iterations | 18 |\n", "| time_elapsed | 15 |\n", "| total_timesteps | 36864 |\n", "| train/ | |\n", "| approx_kl | 0.003698717 |\n", "| clip_fraction | 0.0132 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.917 |\n", "| explained_variance | 0.178 |\n", "| learning_rate | 0.0003 |\n", "| loss | 224 |\n", "| n_updates | 170 |\n", "| policy_gradient_loss | -0.00131 |\n", "| value_loss | 481 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 138 |\n", "| time/ | |\n", "| fps | 2328 |\n", "| iterations | 19 |\n", "| time_elapsed | 16 |\n", "| total_timesteps | 38912 |\n", "| train/ | |\n", "| approx_kl | 0.002963175 |\n", "| clip_fraction | 0.0107 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.913 |\n", "| explained_variance | 0.373 |\n", "| learning_rate | 0.0003 |\n", "| loss | 213 |\n", "| n_updates | 180 |\n", "| policy_gradient_loss | -0.0015 |\n", "| value_loss | 464 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 143 |\n", "| time/ | |\n", "| fps | 2326 |\n", "| iterations | 20 |\n", "| time_elapsed | 17 |\n", "| total_timesteps | 40960 |\n", "| train/ | |\n", "| approx_kl | 0.010358693 |\n", "| clip_fraction | 0.0481 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.87 |\n", "| explained_variance | 0.333 |\n", "| learning_rate | 0.0003 |\n", "| loss | 261 |\n", "| n_updates | 190 |\n", "| policy_gradient_loss | -0.00263 |\n", "| value_loss | 498 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 143 |\n", "| time/ | |\n", "| fps | 2325 |\n", "| iterations | 21 |\n", "| time_elapsed | 18 |\n", "| total_timesteps | 43008 |\n", "| train/ | |\n", "| approx_kl | 0.004888855 |\n", "| clip_fraction | 0.0285 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.823 |\n", "| explained_variance | 0.24 |\n", "| learning_rate | 0.0003 |\n", "| loss | 256 |\n", "| n_updates | 200 |\n", "| policy_gradient_loss | -0.00143 |\n", "| value_loss | 553 |\n", "-----------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 144 |\n", "| time/ | |\n", "| fps | 2324 |\n", "| iterations | 22 |\n", "| time_elapsed | 19 |\n", "| total_timesteps | 45056 |\n", "| train/ | |\n", "| approx_kl | 0.005170634 |\n", "| clip_fraction | 0.0638 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.812 |\n", "| explained_variance | 0.174 |\n", "| learning_rate | 0.0003 |\n", "| loss | 355 |\n", "| n_updates | 210 |\n", "| policy_gradient_loss | -0.00546 |\n", "| value_loss | 604 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 146 |\n", "| time/ | |\n", "| fps | 2322 |\n", "| iterations | 23 |\n", "| time_elapsed | 20 |\n", "| total_timesteps | 47104 |\n", "| train/ | |\n", "| approx_kl | 0.0044944417 |\n", "| clip_fraction | 0.0259 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.755 |\n", "| explained_variance | 0.373 |\n", "| learning_rate | 0.0003 |\n", "| loss | 253 |\n", "| n_updates | 220 |\n", "| policy_gradient_loss | -0.00166 |\n", "| value_loss | 502 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 152 |\n", "| time/ | |\n", "| fps | 2321 |\n", "| iterations | 24 |\n", "| time_elapsed | 21 |\n", "| total_timesteps | 49152 |\n", "| train/ | |\n", "| approx_kl | 0.0029782578 |\n", "| clip_fraction | 0.0332 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.704 |\n", "| explained_variance | 0.229 |\n", "| learning_rate | 0.0003 |\n", "| loss | 322 |\n", "| n_updates | 230 |\n", "| policy_gradient_loss | -0.00261 |\n", "| value_loss | 627 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 158 |\n", "| time/ | |\n", "| fps | 2320 |\n", "| iterations | 25 |\n", "| time_elapsed | 22 |\n", "| total_timesteps | 51200 |\n", "| train/ | |\n", "| approx_kl | 0.002413044 |\n", "| clip_fraction | 0.0171 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.604 |\n", "| explained_variance | 0.405 |\n", "| learning_rate | 0.0003 |\n", "| loss | 273 |\n", "| n_updates | 240 |\n", "| policy_gradient_loss | -0.00183 |\n", "| value_loss | 571 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 162 |\n", "| time/ | |\n", "| fps | 2319 |\n", "| iterations | 26 |\n", "| time_elapsed | 22 |\n", "| total_timesteps | 53248 |\n", "| train/ | |\n", "| approx_kl | 0.0021436154 |\n", "| clip_fraction | 0.0289 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.54 |\n", "| explained_variance | 0.28 |\n", "| learning_rate | 0.0003 |\n", "| loss | 342 |\n", "| n_updates | 250 |\n", "| policy_gradient_loss | -0.00178 |\n", "| value_loss | 662 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 161 |\n", "| time/ | |\n", "| fps | 2317 |\n", "| iterations | 27 |\n", "| time_elapsed | 23 |\n", "| total_timesteps | 55296 |\n", "| train/ | |\n", "| approx_kl | 0.001944111 |\n", "| clip_fraction | 0.0477 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.521 |\n", "| explained_variance | 0.32 |\n", "| learning_rate | 0.0003 |\n", "| loss | 240 |\n", "| n_updates | 260 |\n", "| policy_gradient_loss | -0.00308 |\n", "| value_loss | 609 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 163 |\n", "| time/ | |\n", "| fps | 2316 |\n", "| iterations | 28 |\n", "| time_elapsed | 24 |\n", "| total_timesteps | 57344 |\n", "| train/ | |\n", "| approx_kl | 0.0012462813 |\n", "| clip_fraction | 0.013 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.488 |\n", "| explained_variance | 0.463 |\n", "| learning_rate | 0.0003 |\n", "| loss | 251 |\n", "| n_updates | 270 |\n", "| policy_gradient_loss | -0.000192 |\n", "| value_loss | 524 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 163 |\n", "| time/ | |\n", "| fps | 2316 |\n", "| iterations | 29 |\n", "| time_elapsed | 25 |\n", "| total_timesteps | 59392 |\n", "| train/ | |\n", "| approx_kl | 0.0017906033 |\n", "| clip_fraction | 0.0103 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.467 |\n", "| explained_variance | 0.386 |\n", "| learning_rate | 0.0003 |\n", "| loss | 356 |\n", "| n_updates | 280 |\n", "| policy_gradient_loss | -0.000762 |\n", "| value_loss | 658 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 168 |\n", "| time/ | |\n", "| fps | 2314 |\n", "| iterations | 30 |\n", "| time_elapsed | 26 |\n", "| total_timesteps | 61440 |\n", "| train/ | |\n", "| approx_kl | 0.0015548973 |\n", "| clip_fraction | 0.0115 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.395 |\n", "| explained_variance | 0.191 |\n", "| learning_rate | 0.0003 |\n", "| loss | 380 |\n", "| n_updates | 290 |\n", "| policy_gradient_loss | -0.000679 |\n", "| value_loss | 773 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 169 |\n", "| time/ | |\n", "| fps | 2313 |\n", "| iterations | 31 |\n", "| time_elapsed | 27 |\n", "| total_timesteps | 63488 |\n", "| train/ | |\n", "| approx_kl | 0.0024950395 |\n", "| clip_fraction | 0.0488 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.396 |\n", "| explained_variance | 0.262 |\n", "| learning_rate | 0.0003 |\n", "| loss | 327 |\n", "| n_updates | 300 |\n", "| policy_gradient_loss | -0.00372 |\n", "| value_loss | 763 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2312 |\n", "| iterations | 32 |\n", "| time_elapsed | 28 |\n", "| total_timesteps | 65536 |\n", "| train/ | |\n", "| approx_kl | 0.0011778169 |\n", "| clip_fraction | 0.00537 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.351 |\n", "| explained_variance | 0.364 |\n", "| learning_rate | 0.0003 |\n", "| loss | 364 |\n", "| n_updates | 310 |\n", "| policy_gradient_loss | -0.000972 |\n", "| value_loss | 714 |\n", "------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 175 |\n", "| time/ | |\n", "| fps | 2311 |\n", "| iterations | 33 |\n", "| time_elapsed | 29 |\n", "| total_timesteps | 67584 |\n", "| train/ | |\n", "| approx_kl | 0.001553541 |\n", "| clip_fraction | 0.00767 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.333 |\n", "| explained_variance | 0.316 |\n", "| learning_rate | 0.0003 |\n", "| loss | 350 |\n", "| n_updates | 320 |\n", "| policy_gradient_loss | -0.000265 |\n", "| value_loss | 788 |\n", "-----------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 179 |\n", "| time/ | |\n", "| fps | 2310 |\n", "| iterations | 34 |\n", "| time_elapsed | 30 |\n", "| total_timesteps | 69632 |\n", "| train/ | |\n", "| approx_kl | 0.0021591429 |\n", "| clip_fraction | 0.0343 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.331 |\n", "| explained_variance | 0.368 |\n", "| learning_rate | 0.0003 |\n", "| loss | 292 |\n", "| n_updates | 330 |\n", "| policy_gradient_loss | -0.00157 |\n", "| value_loss | 752 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 178 |\n", "| time/ | |\n", "| fps | 2310 |\n", "| iterations | 35 |\n", "| time_elapsed | 31 |\n", "| total_timesteps | 71680 |\n", "| train/ | |\n", "| approx_kl | 0.0012061024 |\n", "| clip_fraction | 0.0163 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.311 |\n", "| explained_variance | 0.315 |\n", "| learning_rate | 0.0003 |\n", "| loss | 366 |\n", "| n_updates | 340 |\n", "| policy_gradient_loss | -0.00122 |\n", "| value_loss | 794 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 176 |\n", "| time/ | |\n", "| fps | 2309 |\n", "| iterations | 36 |\n", "| time_elapsed | 31 |\n", "| total_timesteps | 73728 |\n", "| train/ | |\n", "| approx_kl | 0.0011449154 |\n", "| clip_fraction | 0.0203 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.268 |\n", "| explained_variance | 0.255 |\n", "| learning_rate | 0.0003 |\n", "| loss | 385 |\n", "| n_updates | 350 |\n", "| policy_gradient_loss | -0.00149 |\n", "| value_loss | 788 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 174 |\n", "| time/ | |\n", "| fps | 2308 |\n", "| iterations | 37 |\n", "| time_elapsed | 32 |\n", "| total_timesteps | 75776 |\n", "| train/ | |\n", "| approx_kl | 0.0006829554 |\n", "| clip_fraction | 0.0137 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.258 |\n", "| explained_variance | 0.22 |\n", "| learning_rate | 0.0003 |\n", "| loss | 599 |\n", "| n_updates | 360 |\n", "| policy_gradient_loss | -0.00145 |\n", "| value_loss | 838 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2307 |\n", "| iterations | 38 |\n", "| time_elapsed | 33 |\n", "| total_timesteps | 77824 |\n", "| train/ | |\n", "| approx_kl | 0.0008449111 |\n", "| clip_fraction | 0.011 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.235 |\n", "| explained_variance | 0.221 |\n", "| learning_rate | 0.0003 |\n", "| loss | 408 |\n", "| n_updates | 370 |\n", "| policy_gradient_loss | -0.000338 |\n", "| value_loss | 888 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 174 |\n", "| time/ | |\n", "| fps | 2306 |\n", "| iterations | 39 |\n", "| time_elapsed | 34 |\n", "| total_timesteps | 79872 |\n", "| train/ | |\n", "| approx_kl | 0.0001816751 |\n", "| clip_fraction | 0.002 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.254 |\n", "| explained_variance | 0.147 |\n", "| learning_rate | 0.0003 |\n", "| loss | 356 |\n", "| n_updates | 380 |\n", "| policy_gradient_loss | -2.76e-05 |\n", "| value_loss | 930 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2246 |\n", "| iterations | 40 |\n", "| time_elapsed | 36 |\n", "| total_timesteps | 81920 |\n", "| train/ | |\n", "| approx_kl | 0.00030809594 |\n", "| clip_fraction | 0.0123 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.231 |\n", "| explained_variance | 0.3 |\n", "| learning_rate | 0.0003 |\n", "| loss | 438 |\n", "| n_updates | 390 |\n", "| policy_gradient_loss | -0.00111 |\n", "| value_loss | 804 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2246 |\n", "| iterations | 41 |\n", "| time_elapsed | 37 |\n", "| total_timesteps | 83968 |\n", "| train/ | |\n", "| approx_kl | 0.00031096602 |\n", "| clip_fraction | 0.0112 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.239 |\n", "| explained_variance | 0.263 |\n", "| learning_rate | 0.0003 |\n", "| loss | 348 |\n", "| n_updates | 400 |\n", "| policy_gradient_loss | -0.000966 |\n", "| value_loss | 886 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 168 |\n", "| time/ | |\n", "| fps | 2247 |\n", "| iterations | 42 |\n", "| time_elapsed | 38 |\n", "| total_timesteps | 86016 |\n", "| train/ | |\n", "| approx_kl | 0.0016176439 |\n", "| clip_fraction | 0.0151 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.228 |\n", "| explained_variance | 0.323 |\n", "| learning_rate | 0.0003 |\n", "| loss | 547 |\n", "| n_updates | 410 |\n", "| policy_gradient_loss | -0.000936 |\n", "| value_loss | 803 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 174 |\n", "| time/ | |\n", "| fps | 2248 |\n", "| iterations | 43 |\n", "| time_elapsed | 39 |\n", "| total_timesteps | 88064 |\n", "| train/ | |\n", "| approx_kl | 0.00015557147 |\n", "| clip_fraction | 0.00645 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.22 |\n", "| explained_variance | 0.135 |\n", "| learning_rate | 0.0003 |\n", "| loss | 447 |\n", "| n_updates | 420 |\n", "| policy_gradient_loss | -5.43e-05 |\n", "| value_loss | 903 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2250 |\n", "| iterations | 44 |\n", "| time_elapsed | 40 |\n", "| total_timesteps | 90112 |\n", "| train/ | |\n", "| approx_kl | 0.0006329224 |\n", "| clip_fraction | 0.00718 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.204 |\n", "| explained_variance | 0.119 |\n", "| learning_rate | 0.0003 |\n", "| loss | 558 |\n", "| n_updates | 430 |\n", "| policy_gradient_loss | -7.04e-05 |\n", "| value_loss | 944 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2251 |\n", "| iterations | 45 |\n", "| time_elapsed | 40 |\n", "| total_timesteps | 92160 |\n", "| train/ | |\n", "| approx_kl | 0.0005965793 |\n", "| clip_fraction | 0.00596 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.206 |\n", "| explained_variance | 0.193 |\n", "| learning_rate | 0.0003 |\n", "| loss | 458 |\n", "| n_updates | 440 |\n", "| policy_gradient_loss | -0.000268 |\n", "| value_loss | 934 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 165 |\n", "| time/ | |\n", "| fps | 2252 |\n", "| iterations | 46 |\n", "| time_elapsed | 41 |\n", "| total_timesteps | 94208 |\n", "| train/ | |\n", "| approx_kl | 0.0007255195 |\n", "| clip_fraction | 0.0152 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.181 |\n", "| explained_variance | 0.186 |\n", "| learning_rate | 0.0003 |\n", "| loss | 524 |\n", "| n_updates | 450 |\n", "| policy_gradient_loss | -0.00107 |\n", "| value_loss | 899 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2253 |\n", "| iterations | 47 |\n", "| time_elapsed | 42 |\n", "| total_timesteps | 96256 |\n", "| train/ | |\n", "| approx_kl | 0.0006645897 |\n", "| clip_fraction | 0.00654 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.19 |\n", "| explained_variance | 0.155 |\n", "| learning_rate | 0.0003 |\n", "| loss | 376 |\n", "| n_updates | 460 |\n", "| policy_gradient_loss | -0.000365 |\n", "| value_loss | 943 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 171 |\n", "| time/ | |\n", "| fps | 2254 |\n", "| iterations | 48 |\n", "| time_elapsed | 43 |\n", "| total_timesteps | 98304 |\n", "| train/ | |\n", "| approx_kl | 0.00016179733 |\n", "| clip_fraction | 0.0041 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.188 |\n", "| explained_variance | 0.161 |\n", "| learning_rate | 0.0003 |\n", "| loss | 503 |\n", "| n_updates | 470 |\n", "| policy_gradient_loss | -0.00011 |\n", "| value_loss | 916 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 169 |\n", "| time/ | |\n", "| fps | 2255 |\n", "| iterations | 49 |\n", "| time_elapsed | 44 |\n", "| total_timesteps | 100352 |\n", "| train/ | |\n", "| approx_kl | 0.0004814395 |\n", "| clip_fraction | 0.00327 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.189 |\n", "| explained_variance | 0.118 |\n", "| learning_rate | 0.0003 |\n", "| loss | 353 |\n", "| n_updates | 480 |\n", "| policy_gradient_loss | 6.37e-05 |\n", "| value_loss | 957 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 167 |\n", "| time/ | |\n", "| fps | 2255 |\n", "| iterations | 50 |\n", "| time_elapsed | 45 |\n", "| total_timesteps | 102400 |\n", "| train/ | |\n", "| approx_kl | 0.00044479658 |\n", "| clip_fraction | 0.00469 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.162 |\n", "| explained_variance | 0.0203 |\n", "| learning_rate | 0.0003 |\n", "| loss | 435 |\n", "| n_updates | 490 |\n", "| policy_gradient_loss | -0.000469 |\n", "| value_loss | 1.05e+03 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2256 |\n", "| iterations | 51 |\n", "| time_elapsed | 46 |\n", "| total_timesteps | 104448 |\n", "| train/ | |\n", "| approx_kl | 0.00044121026 |\n", "| clip_fraction | 0.00332 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.163 |\n", "| explained_variance | 0.115 |\n", "| learning_rate | 0.0003 |\n", "| loss | 428 |\n", "| n_updates | 500 |\n", "| policy_gradient_loss | -0.000204 |\n", "| value_loss | 920 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 171 |\n", "| time/ | |\n", "| fps | 2257 |\n", "| iterations | 52 |\n", "| time_elapsed | 47 |\n", "| total_timesteps | 106496 |\n", "| train/ | |\n", "| approx_kl | 0.00064821605 |\n", "| clip_fraction | 0.00645 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.182 |\n", "| explained_variance | 0.186 |\n", "| learning_rate | 0.0003 |\n", "| loss | 390 |\n", "| n_updates | 510 |\n", "| policy_gradient_loss | -0.000226 |\n", "| value_loss | 961 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2257 |\n", "| iterations | 53 |\n", "| time_elapsed | 48 |\n", "| total_timesteps | 108544 |\n", "| train/ | |\n", "| approx_kl | 0.0012013838 |\n", "| clip_fraction | 0.00737 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.183 |\n", "| explained_variance | 0.169 |\n", "| learning_rate | 0.0003 |\n", "| loss | 594 |\n", "| n_updates | 520 |\n", "| policy_gradient_loss | -0.000278 |\n", "| value_loss | 955 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 168 |\n", "| time/ | |\n", "| fps | 2258 |\n", "| iterations | 54 |\n", "| time_elapsed | 48 |\n", "| total_timesteps | 110592 |\n", "| train/ | |\n", "| approx_kl | 0.00077301863 |\n", "| clip_fraction | 0.00303 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.178 |\n", "| explained_variance | 0.0916 |\n", "| learning_rate | 0.0003 |\n", "| loss | 782 |\n", "| n_updates | 530 |\n", "| policy_gradient_loss | 8.33e-05 |\n", "| value_loss | 1e+03 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 168 |\n", "| time/ | |\n", "| fps | 2258 |\n", "| iterations | 55 |\n", "| time_elapsed | 49 |\n", "| total_timesteps | 112640 |\n", "| train/ | |\n", "| approx_kl | 0.0009950304 |\n", "| clip_fraction | 0.00928 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.194 |\n", "| explained_variance | 0.0875 |\n", "| learning_rate | 0.0003 |\n", "| loss | 445 |\n", "| n_updates | 540 |\n", "| policy_gradient_loss | -0.000889 |\n", "| value_loss | 969 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 167 |\n", "| time/ | |\n", "| fps | 2259 |\n", "| iterations | 56 |\n", "| time_elapsed | 50 |\n", "| total_timesteps | 114688 |\n", "| train/ | |\n", "| approx_kl | 0.0023866436 |\n", "| clip_fraction | 0.014 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.158 |\n", "| explained_variance | 0.109 |\n", "| learning_rate | 0.0003 |\n", "| loss | 304 |\n", "| n_updates | 550 |\n", "| policy_gradient_loss | -0.000242 |\n", "| value_loss | 969 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 166 |\n", "| time/ | |\n", "| fps | 2259 |\n", "| iterations | 57 |\n", "| time_elapsed | 51 |\n", "| total_timesteps | 116736 |\n", "| train/ | |\n", "| approx_kl | 0.0008009806 |\n", "| clip_fraction | 0.00605 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.154 |\n", "| explained_variance | -0.00684 |\n", "| learning_rate | 0.0003 |\n", "| loss | 623 |\n", "| n_updates | 560 |\n", "| policy_gradient_loss | -0.000992 |\n", "| value_loss | 1.15e+03 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 162 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 58 |\n", "| time_elapsed | 52 |\n", "| total_timesteps | 118784 |\n", "| train/ | |\n", "| approx_kl | 0.0009766024 |\n", "| clip_fraction | 0.00854 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.135 |\n", "| explained_variance | 0.116 |\n", "| learning_rate | 0.0003 |\n", "| loss | 495 |\n", "| n_updates | 570 |\n", "| policy_gradient_loss | 0.000732 |\n", "| value_loss | 965 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 160 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 59 |\n", "| time_elapsed | 53 |\n", "| total_timesteps | 120832 |\n", "| train/ | |\n", "| approx_kl | 0.00029140065 |\n", "| clip_fraction | 0.00151 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.124 |\n", "| explained_variance | 0.0377 |\n", "| learning_rate | 0.0003 |\n", "| loss | 549 |\n", "| n_updates | 580 |\n", "| policy_gradient_loss | -0.000495 |\n", "| value_loss | 944 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 160 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 60 |\n", "| time_elapsed | 54 |\n", "| total_timesteps | 122880 |\n", "| train/ | |\n", "| approx_kl | 0.0003684046 |\n", "| clip_fraction | 0.00347 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.129 |\n", "| explained_variance | 0.0442 |\n", "| learning_rate | 0.0003 |\n", "| loss | 488 |\n", "| n_updates | 590 |\n", "| policy_gradient_loss | 0.000179 |\n", "| value_loss | 937 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 168 |\n", "| time/ | |\n", "| fps | 2261 |\n", "| iterations | 61 |\n", "| time_elapsed | 55 |\n", "| total_timesteps | 124928 |\n", "| train/ | |\n", "| approx_kl | 0.00045891784 |\n", "| clip_fraction | 0.00635 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.13 |\n", "| explained_variance | 0.0348 |\n", "| learning_rate | 0.0003 |\n", "| loss | 592 |\n", "| n_updates | 600 |\n", "| policy_gradient_loss | -0.000529 |\n", "| value_loss | 969 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 171 |\n", "| time/ | |\n", "| fps | 2261 |\n", "| iterations | 62 |\n", "| time_elapsed | 56 |\n", "| total_timesteps | 126976 |\n", "| train/ | |\n", "| approx_kl | 0.00029554983 |\n", "| clip_fraction | 0.00522 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.151 |\n", "| explained_variance | 0.0748 |\n", "| learning_rate | 0.0003 |\n", "| loss | 370 |\n", "| n_updates | 610 |\n", "| policy_gradient_loss | -0.000586 |\n", "| value_loss | 975 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2259 |\n", "| iterations | 63 |\n", "| time_elapsed | 57 |\n", "| total_timesteps | 129024 |\n", "| train/ | |\n", "| approx_kl | 0.00018169518 |\n", "| clip_fraction | 0.0042 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.148 |\n", "| explained_variance | 0.0646 |\n", "| learning_rate | 0.0003 |\n", "| loss | 478 |\n", "| n_updates | 620 |\n", "| policy_gradient_loss | 0.000226 |\n", "| value_loss | 1.05e+03 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 169 |\n", "| time/ | |\n", "| fps | 2259 |\n", "| iterations | 64 |\n", "| time_elapsed | 58 |\n", "| total_timesteps | 131072 |\n", "| train/ | |\n", "| approx_kl | 0.0005208097 |\n", "| clip_fraction | 0.00786 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.151 |\n", "| explained_variance | 0.141 |\n", "| learning_rate | 0.0003 |\n", "| loss | 386 |\n", "| n_updates | 630 |\n", "| policy_gradient_loss | -0.000395 |\n", "| value_loss | 942 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 169 |\n", "| time/ | |\n", "| fps | 2259 |\n", "| iterations | 65 |\n", "| time_elapsed | 58 |\n", "| total_timesteps | 133120 |\n", "| train/ | |\n", "| approx_kl | 0.0006012707 |\n", "| clip_fraction | 0.0167 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.215 |\n", "| explained_variance | 0.0752 |\n", "| learning_rate | 0.0003 |\n", "| loss | 461 |\n", "| n_updates | 640 |\n", "| policy_gradient_loss | 0.00108 |\n", "| value_loss | 992 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 171 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 66 |\n", "| time_elapsed | 59 |\n", "| total_timesteps | 135168 |\n", "| train/ | |\n", "| approx_kl | 8.7589695e-05 |\n", "| clip_fraction | 0.0042 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.188 |\n", "| explained_variance | 0.204 |\n", "| learning_rate | 0.0003 |\n", "| loss | 574 |\n", "| n_updates | 650 |\n", "| policy_gradient_loss | 0.000315 |\n", "| value_loss | 906 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 174 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 67 |\n", "| time_elapsed | 60 |\n", "| total_timesteps | 137216 |\n", "| train/ | |\n", "| approx_kl | 0.00078866933 |\n", "| clip_fraction | 0.0127 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.166 |\n", "| explained_variance | 0.26 |\n", "| learning_rate | 0.0003 |\n", "| loss | 425 |\n", "| n_updates | 660 |\n", "| policy_gradient_loss | -0.000906 |\n", "| value_loss | 877 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 171 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 68 |\n", "| time_elapsed | 61 |\n", "| total_timesteps | 139264 |\n", "| train/ | |\n", "| approx_kl | 0.0002554046 |\n", "| clip_fraction | 0.0141 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.184 |\n", "| explained_variance | 0.198 |\n", "| learning_rate | 0.0003 |\n", "| loss | 390 |\n", "| n_updates | 670 |\n", "| policy_gradient_loss | 0.00231 |\n", "| value_loss | 916 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2261 |\n", "| iterations | 69 |\n", "| time_elapsed | 62 |\n", "| total_timesteps | 141312 |\n", "| train/ | |\n", "| approx_kl | 0.0002507472 |\n", "| clip_fraction | 0.0041 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.161 |\n", "| explained_variance | 0.108 |\n", "| learning_rate | 0.0003 |\n", "| loss | 495 |\n", "| n_updates | 680 |\n", "| policy_gradient_loss | -0.000457 |\n", "| value_loss | 1.03e+03 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 70 |\n", "| time_elapsed | 63 |\n", "| total_timesteps | 143360 |\n", "| train/ | |\n", "| approx_kl | 7.5960736e-05 |\n", "| clip_fraction | 0.00156 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.17 |\n", "| explained_variance | 0.144 |\n", "| learning_rate | 0.0003 |\n", "| loss | 466 |\n", "| n_updates | 690 |\n", "| policy_gradient_loss | 0.00015 |\n", "| value_loss | 953 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 177 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 71 |\n", "| time_elapsed | 64 |\n", "| total_timesteps | 145408 |\n", "| train/ | |\n", "| approx_kl | 0.00048263054 |\n", "| clip_fraction | 0.00151 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.172 |\n", "| explained_variance | 0.299 |\n", "| learning_rate | 0.0003 |\n", "| loss | 457 |\n", "| n_updates | 700 |\n", "| policy_gradient_loss | 3.72e-05 |\n", "| value_loss | 899 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 179 |\n", "| time/ | |\n", "| fps | 2260 |\n", "| iterations | 72 |\n", "| time_elapsed | 65 |\n", "| total_timesteps | 147456 |\n", "| train/ | |\n", "| approx_kl | 0.00055459974 |\n", "| clip_fraction | 0.00786 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.166 |\n", "| explained_variance | 0.323 |\n", "| learning_rate | 0.0003 |\n", "| loss | 630 |\n", "| n_updates | 710 |\n", "| policy_gradient_loss | -0.000848 |\n", "| value_loss | 886 |\n", "-------------------------------------------\n", "-----------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 173 |\n", "| time/ | |\n", "| fps | 2261 |\n", "| iterations | 73 |\n", "| time_elapsed | 66 |\n", "| total_timesteps | 149504 |\n", "| train/ | |\n", "| approx_kl | 0.000288731 |\n", "| clip_fraction | 0.00869 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.168 |\n", "| explained_variance | 0.0986 |\n", "| learning_rate | 0.0003 |\n", "| loss | 392 |\n", "| n_updates | 720 |\n", "| policy_gradient_loss | 0.00107 |\n", "| value_loss | 1.06e+03 |\n", "-----------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 168 |\n", "| time/ | |\n", "| fps | 2261 |\n", "| iterations | 74 |\n", "| time_elapsed | 67 |\n", "| total_timesteps | 151552 |\n", "| train/ | |\n", "| approx_kl | 0.00028759264 |\n", "| clip_fraction | 0.00645 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.199 |\n", "| explained_variance | 0.178 |\n", "| learning_rate | 0.0003 |\n", "| loss | 549 |\n", "| n_updates | 730 |\n", "| policy_gradient_loss | -0.000524 |\n", "| value_loss | 973 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2262 |\n", "| iterations | 75 |\n", "| time_elapsed | 67 |\n", "| total_timesteps | 153600 |\n", "| train/ | |\n", "| approx_kl | 0.0005823185 |\n", "| clip_fraction | 0.00435 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.21 |\n", "| explained_variance | 0.334 |\n", "| learning_rate | 0.0003 |\n", "| loss | 400 |\n", "| n_updates | 740 |\n", "| policy_gradient_loss | -0.000216 |\n", "| value_loss | 816 |\n", "------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 174 |\n", "| time/ | |\n", "| fps | 2263 |\n", "| iterations | 76 |\n", "| time_elapsed | 68 |\n", "| total_timesteps | 155648 |\n", "| train/ | |\n", "| approx_kl | 0.0011690343 |\n", "| clip_fraction | 0.0138 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.211 |\n", "| explained_variance | 0.265 |\n", "| learning_rate | 0.0003 |\n", "| loss | 360 |\n", "| n_updates | 750 |\n", "| policy_gradient_loss | -0.000666 |\n", "| value_loss | 856 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2263 |\n", "| iterations | 77 |\n", "| time_elapsed | 69 |\n", "| total_timesteps | 157696 |\n", "| train/ | |\n", "| approx_kl | 0.00011908921 |\n", "| clip_fraction | 0.00937 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.197 |\n", "| explained_variance | 0.25 |\n", "| learning_rate | 0.0003 |\n", "| loss | 442 |\n", "| n_updates | 760 |\n", "| policy_gradient_loss | -8.98e-05 |\n", "| value_loss | 904 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 173 |\n", "| time/ | |\n", "| fps | 2263 |\n", "| iterations | 78 |\n", "| time_elapsed | 70 |\n", "| total_timesteps | 159744 |\n", "| train/ | |\n", "| approx_kl | 4.769425e-05 |\n", "| clip_fraction | 0.0019 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.188 |\n", "| explained_variance | 0.153 |\n", "| learning_rate | 0.0003 |\n", "| loss | 430 |\n", "| n_updates | 770 |\n", "| policy_gradient_loss | 9.81e-05 |\n", "| value_loss | 1e+03 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 173 |\n", "| time/ | |\n", "| fps | 2264 |\n", "| iterations | 79 |\n", "| time_elapsed | 71 |\n", "| total_timesteps | 161792 |\n", "| train/ | |\n", "| approx_kl | 0.00068604236 |\n", "| clip_fraction | 0.013 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.161 |\n", "| explained_variance | 0.29 |\n", "| learning_rate | 0.0003 |\n", "| loss | 383 |\n", "| n_updates | 780 |\n", "| policy_gradient_loss | -0.000718 |\n", "| value_loss | 844 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2264 |\n", "| iterations | 80 |\n", "| time_elapsed | 72 |\n", "| total_timesteps | 163840 |\n", "| train/ | |\n", "| approx_kl | 0.00028661787 |\n", "| clip_fraction | 0.00371 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.147 |\n", "| explained_variance | 0.195 |\n", "| learning_rate | 0.0003 |\n", "| loss | 596 |\n", "| n_updates | 790 |\n", "| policy_gradient_loss | -0.000152 |\n", "| value_loss | 974 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 163 |\n", "| time/ | |\n", "| fps | 2264 |\n", "| iterations | 81 |\n", "| time_elapsed | 73 |\n", "| total_timesteps | 165888 |\n", "| train/ | |\n", "| approx_kl | 6.312042e-05 |\n", "| clip_fraction | 0.00713 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.159 |\n", "| explained_variance | 0.116 |\n", "| learning_rate | 0.0003 |\n", "| loss | 537 |\n", "| n_updates | 800 |\n", "| policy_gradient_loss | -0.00025 |\n", "| value_loss | 994 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 165 |\n", "| time/ | |\n", "| fps | 2265 |\n", "| iterations | 82 |\n", "| time_elapsed | 74 |\n", "| total_timesteps | 167936 |\n", "| train/ | |\n", "| approx_kl | 0.00083088246 |\n", "| clip_fraction | 0.00654 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.133 |\n", "| explained_variance | 0.187 |\n", "| learning_rate | 0.0003 |\n", "| loss | 416 |\n", "| n_updates | 810 |\n", "| policy_gradient_loss | 0.00012 |\n", "| value_loss | 850 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 167 |\n", "| time/ | |\n", "| fps | 2265 |\n", "| iterations | 83 |\n", "| time_elapsed | 75 |\n", "| total_timesteps | 169984 |\n", "| train/ | |\n", "| approx_kl | 9.744492e-05 |\n", "| clip_fraction | 0.00083 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.122 |\n", "| explained_variance | 0.0716 |\n", "| learning_rate | 0.0003 |\n", "| loss | 514 |\n", "| n_updates | 820 |\n", "| policy_gradient_loss | 3.73e-05 |\n", "| value_loss | 1.04e+03 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2265 |\n", "| iterations | 84 |\n", "| time_elapsed | 75 |\n", "| total_timesteps | 172032 |\n", "| train/ | |\n", "| approx_kl | 0.00024220734 |\n", "| clip_fraction | 0.00483 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.136 |\n", "| explained_variance | 0.227 |\n", "| learning_rate | 0.0003 |\n", "| loss | 474 |\n", "| n_updates | 830 |\n", "| policy_gradient_loss | -0.000561 |\n", "| value_loss | 935 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 165 |\n", "| time/ | |\n", "| fps | 2266 |\n", "| iterations | 85 |\n", "| time_elapsed | 76 |\n", "| total_timesteps | 174080 |\n", "| train/ | |\n", "| approx_kl | 0.00034282872 |\n", "| clip_fraction | 0.0064 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.133 |\n", "| explained_variance | 0.219 |\n", "| learning_rate | 0.0003 |\n", "| loss | 512 |\n", "| n_updates | 840 |\n", "| policy_gradient_loss | -0.000796 |\n", "| value_loss | 956 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 164 |\n", "| time/ | |\n", "| fps | 2266 |\n", "| iterations | 86 |\n", "| time_elapsed | 77 |\n", "| total_timesteps | 176128 |\n", "| train/ | |\n", "| approx_kl | 9.188682e-05 |\n", "| clip_fraction | 0.00205 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.137 |\n", "| explained_variance | 0.177 |\n", "| learning_rate | 0.0003 |\n", "| loss | 499 |\n", "| n_updates | 850 |\n", "| policy_gradient_loss | 0.000445 |\n", "| value_loss | 947 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 166 |\n", "| time/ | |\n", "| fps | 2266 |\n", "| iterations | 87 |\n", "| time_elapsed | 78 |\n", "| total_timesteps | 178176 |\n", "| train/ | |\n", "| approx_kl | 0.00045906712 |\n", "| clip_fraction | 0.00513 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.152 |\n", "| explained_variance | 0.263 |\n", "| learning_rate | 0.0003 |\n", "| loss | 492 |\n", "| n_updates | 860 |\n", "| policy_gradient_loss | -0.000181 |\n", "| value_loss | 793 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2266 |\n", "| iterations | 88 |\n", "| time_elapsed | 79 |\n", "| total_timesteps | 180224 |\n", "| train/ | |\n", "| approx_kl | 0.00045389446 |\n", "| clip_fraction | 0.00723 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.17 |\n", "| explained_variance | 0.135 |\n", "| learning_rate | 0.0003 |\n", "| loss | 526 |\n", "| n_updates | 870 |\n", "| policy_gradient_loss | -0.000558 |\n", "| value_loss | 961 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 176 |\n", "| time/ | |\n", "| fps | 2267 |\n", "| iterations | 89 |\n", "| time_elapsed | 80 |\n", "| total_timesteps | 182272 |\n", "| train/ | |\n", "| approx_kl | 0.00026226143 |\n", "| clip_fraction | 0.00132 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.175 |\n", "| explained_variance | 0.237 |\n", "| learning_rate | 0.0003 |\n", "| loss | 259 |\n", "| n_updates | 880 |\n", "| policy_gradient_loss | -0.000383 |\n", "| value_loss | 844 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 176 |\n", "| time/ | |\n", "| fps | 2267 |\n", "| iterations | 90 |\n", "| time_elapsed | 81 |\n", "| total_timesteps | 184320 |\n", "| train/ | |\n", "| approx_kl | 0.0004913126 |\n", "| clip_fraction | 0.00737 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.16 |\n", "| explained_variance | 0.31 |\n", "| learning_rate | 0.0003 |\n", "| loss | 476 |\n", "| n_updates | 890 |\n", "| policy_gradient_loss | -0.000867 |\n", "| value_loss | 855 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 179 |\n", "| time/ | |\n", "| fps | 2267 |\n", "| iterations | 91 |\n", "| time_elapsed | 82 |\n", "| total_timesteps | 186368 |\n", "| train/ | |\n", "| approx_kl | 0.00075706164 |\n", "| clip_fraction | 0.00869 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.139 |\n", "| explained_variance | 0.18 |\n", "| learning_rate | 0.0003 |\n", "| loss | 566 |\n", "| n_updates | 900 |\n", "| policy_gradient_loss | -0.00113 |\n", "| value_loss | 987 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 182 |\n", "| time/ | |\n", "| fps | 2267 |\n", "| iterations | 92 |\n", "| time_elapsed | 83 |\n", "| total_timesteps | 188416 |\n", "| train/ | |\n", "| approx_kl | 5.1790266e-05 |\n", "| clip_fraction | 0.000293 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.132 |\n", "| explained_variance | 0.161 |\n", "| learning_rate | 0.0003 |\n", "| loss | 517 |\n", "| n_updates | 910 |\n", "| policy_gradient_loss | 7.74e-05 |\n", "| value_loss | 957 |\n", "-------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 178 |\n", "| time/ | |\n", "| fps | 2267 |\n", "| iterations | 93 |\n", "| time_elapsed | 83 |\n", "| total_timesteps | 190464 |\n", "| train/ | |\n", "| approx_kl | 0.00027950367 |\n", "| clip_fraction | 0.00566 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.154 |\n", "| explained_variance | 0.141 |\n", "| learning_rate | 0.0003 |\n", "| loss | 403 |\n", "| n_updates | 920 |\n", "| policy_gradient_loss | -0.000473 |\n", "| value_loss | 989 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 172 |\n", "| time/ | |\n", "| fps | 2268 |\n", "| iterations | 94 |\n", "| time_elapsed | 84 |\n", "| total_timesteps | 192512 |\n", "| train/ | |\n", "| approx_kl | 0.0004199293 |\n", "| clip_fraction | 0.00581 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.136 |\n", "| explained_variance | 0.106 |\n", "| learning_rate | 0.0003 |\n", "| loss | 464 |\n", "| n_updates | 930 |\n", "| policy_gradient_loss | -3.11e-05 |\n", "| value_loss | 1.02e+03 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 162 |\n", "| time/ | |\n", "| fps | 2268 |\n", "| iterations | 95 |\n", "| time_elapsed | 85 |\n", "| total_timesteps | 194560 |\n", "| train/ | |\n", "| approx_kl | 0.00010615683 |\n", "| clip_fraction | 0.00459 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.14 |\n", "| explained_variance | 0.13 |\n", "| learning_rate | 0.0003 |\n", "| loss | 394 |\n", "| n_updates | 940 |\n", "| policy_gradient_loss | -2.78e-05 |\n", "| value_loss | 1.01e+03 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 167 |\n", "| time/ | |\n", "| fps | 2269 |\n", "| iterations | 96 |\n", "| time_elapsed | 86 |\n", "| total_timesteps | 196608 |\n", "| train/ | |\n", "| approx_kl | 4.879624e-05 |\n", "| clip_fraction | 0.00264 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.142 |\n", "| explained_variance | 0.142 |\n", "| learning_rate | 0.0003 |\n", "| loss | 434 |\n", "| n_updates | 950 |\n", "| policy_gradient_loss | -0.000329 |\n", "| value_loss | 957 |\n", "------------------------------------------\n", "-------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 170 |\n", "| time/ | |\n", "| fps | 2269 |\n", "| iterations | 97 |\n", "| time_elapsed | 87 |\n", "| total_timesteps | 198656 |\n", "| train/ | |\n", "| approx_kl | 0.00019674914 |\n", "| clip_fraction | 0.00391 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.146 |\n", "| explained_variance | 0.223 |\n", "| learning_rate | 0.0003 |\n", "| loss | 419 |\n", "| n_updates | 960 |\n", "| policy_gradient_loss | -0.000219 |\n", "| value_loss | 910 |\n", "-------------------------------------------\n", "------------------------------------------\n", "| rollout/ | |\n", "| ep_len_mean | 60 |\n", "| ep_rew_mean | 176 |\n", "| time/ | |\n", "| fps | 2270 |\n", "| iterations | 98 |\n", "| time_elapsed | 88 |\n", "| total_timesteps | 200704 |\n", "| train/ | |\n", "| approx_kl | 0.0002947012 |\n", "| clip_fraction | 0.00571 |\n", "| clip_range | 0.2 |\n", "| entropy_loss | -0.149 |\n", "| explained_variance | 0.142 |\n", "| learning_rate | 0.0003 |\n", "| loss | 533 |\n", "| n_updates | 970 |\n", "| policy_gradient_loss | 9.14e-06 |\n", "| value_loss | 1.07e+03 |\n", "------------------------------------------\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 1024, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#Training Model Prediction\n", "model = PPO(\"MlpPolicy\", env, verbose=1)\n", "model.learn(total_timesteps=200000)\n" ] }, { "cell_type": "code", "execution_count": 1025, "metadata": {}, "outputs": [], "source": [ "# Put experience into replay buffer\n", "for _ in range(100):\n", " state = env.reset()\n", " state = tuple(state)\n", " done = False\n", " while not done:\n", " #action = env.action_space.sample()\n", " if np.random.rand() < 0.5:\n", " action = env.action_space.sample()\n", " else:\n", " action, _ = model.predict(state)\n", " action = int(action)\n", " next_state, reward, done, info = env.step(action)\n", " next_state = tuple(next_state)\n", " if reward == 4:\n", " buffer.add((state, action, reward, next_state, done))\n", " state = next_state" ] }, { "cell_type": "code", "execution_count": 1032, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "dIstJPVsDfY-", "outputId": "5e857238-0866-4d85-8d28-d70e305bccfc" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Episode 1, Iteration 1, State: (5.0, 16.0, 118.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 1, Iteration 2, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 5, Action Source: Exploration\n", "Episode 1, Iteration 3, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 1, Iteration 4, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 1, Iteration 5, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 1, Iteration 6, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 1, Iteration 7, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 1, Iteration 8, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 1, Iteration 9, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 26, Action Source: Exploration\n", "Episode 1, Iteration 10, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 29, Action Source: Model Prediction\n", "Episode 1, Iteration 11, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 1, Iteration 12, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 34, Action Source: Exploration\n", "Episode 1, Iteration 13, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 1, Iteration 14, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 1, Iteration 15, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 1, Iteration 16, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 1, Iteration 17, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 1, Iteration 18, State: (5.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (5.0, 19.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 1, Iteration 19, State: (5.0, 19.0, 135.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 49, Action Source: Exploration\n", "Episode 1, Iteration 20, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 1, Iteration 21, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 1, Iteration 22, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 1, Iteration 23, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 1, Iteration 24, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 18.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 55, Action Source: Exploration\n", "Episode 1, Iteration 25, State: (3.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 18.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 1, Iteration 26, State: (3.0, 18.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 1, Iteration 27, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 18.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 1, Iteration 28, State: (3.0, 18.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 1, Iteration 29, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 18.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 1, Iteration 30, State: (3.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 18.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 1, Iteration 31, State: (3.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 18.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 1, Iteration 32, State: (3.0, 18.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 1, Iteration 33, State: (3.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 1, Iteration 34, State: (3.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 1, Iteration 35, State: (3.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 1, Iteration 36, State: (3.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 1, Iteration 37, State: (3.0, 17.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 1, Iteration 38, State: (3.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 1, Iteration 39, State: (3.0, 16.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 1, Iteration 40, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 1, Iteration 41, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 1, Iteration 42, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 1, Iteration 43, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 1, Iteration 44, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 1, Iteration 45, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 1, Iteration 46, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 1, Iteration 47, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 1, Iteration 48, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 1, Iteration 49, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 1, Iteration 50, State: (3.0, 16.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 1, Iteration 51, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 1, Iteration 52, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 1, Iteration 53, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 1, Iteration 54, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 122, Action Source: Exploitation\n", "Episode 1, Iteration 55, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 1, Iteration 56, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 130, Action Source: Exploration\n", "Episode 1, Iteration 57, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 1, Iteration 58, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 137, Action Source: Exploration\n", "Episode 1, Iteration 59, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 141, Action Source: Exploration\n", "Episode 1, Iteration 60, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 144, Action Source: Exploration\n", "Episode: 1 Best Action: 0 Best evaluation action: 0\n", "Episode: 1 Score: 144 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 2, Iteration 1, State: (4.0, 14.0, 115.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 2, Iteration 2, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 2, Iteration 3, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 2, Iteration 4, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 2, Iteration 5, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 2, Iteration 6, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 2, Iteration 7, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 2, Iteration 8, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 2, Iteration 9, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 2, Iteration 10, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 2, Iteration 11, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 2, Iteration 12, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 2, Iteration 13, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 2, Iteration 14, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 2, Iteration 15, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 2, Iteration 16, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 2, Iteration 17, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 47, Action Source: Exploration\n", "Episode 2, Iteration 18, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 2, Iteration 19, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 2, Iteration 20, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 2, Iteration 21, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 2, Iteration 22, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 2, Iteration 23, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 2, Iteration 24, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 2, Iteration 25, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 2, Iteration 26, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 78, Action Source: Exploitation\n", "Episode 2, Iteration 27, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 82, Action Source: Exploration\n", "Episode 2, Iteration 28, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 2, Iteration 29, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 2, Iteration 30, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 2, Iteration 31, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 2, Iteration 32, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 2, Iteration 33, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 2, Iteration 34, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 2, Iteration 35, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 2, Iteration 36, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 2, Iteration 37, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 2, Iteration 38, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 109, Action Source: Exploration\n", "Episode 2, Iteration 39, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 2, Iteration 40, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 2, Iteration 41, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 2, Iteration 42, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 2, Iteration 43, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 129, Action Source: Exploration\n", "Episode 2, Iteration 44, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 2, Iteration 45, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 137, Action Source: Exploitation\n", "Episode 2, Iteration 46, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 141, Action Source: Exploitation\n", "Episode 2, Iteration 47, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 2, Iteration 48, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 2, Iteration 49, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 2, Iteration 50, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 151, Action Source: Model Prediction\n", "Episode 2, Iteration 51, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 2, Iteration 52, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 154, Action Source: Exploration\n", "Episode 2, Iteration 53, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 156, Action Source: Exploration\n", "Episode 2, Iteration 54, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 158, Action Source: Model Prediction\n", "Episode 2, Iteration 55, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 2, Iteration 56, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 162, Action Source: Model Prediction\n", "Episode 2, Iteration 57, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 164, Action Source: Exploration\n", "Episode 2, Iteration 58, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 166, Action Source: Exploration\n", "Episode 2, Iteration 59, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 168, Action Source: Exploration\n", "Episode 2, Iteration 60, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 170, Action Source: Model Prediction\n", "Episode: 2 Best Action: 0 Best evaluation action: 2\n", "Episode: 2 Score: 170 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 3, Iteration 1, State: (4.0, 15.0, 124.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 3, Iteration 2, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 3, Iteration 3, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 3, Iteration 4, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 3, Iteration 5, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 3, Iteration 6, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 3, Iteration 7, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 3, Iteration 8, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 3, Iteration 9, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 3, Iteration 10, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 3, Iteration 11, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 33, Action Source: Exploration\n", "Episode 3, Iteration 12, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 3, Iteration 13, State: (5.0, 17.0, 135.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 3, Iteration 14, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 3, Iteration 15, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 3, Iteration 16, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 3, Iteration 17, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 3, Iteration 18, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 3, Iteration 19, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 3, Iteration 20, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 3, Iteration 21, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 3, Iteration 22, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 3, Iteration 23, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 3, Iteration 24, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 3, Iteration 25, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 3, Iteration 26, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 3, Iteration 27, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 3, Iteration 28, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 3, Iteration 29, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 3, Iteration 30, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 3, Iteration 31, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 3, Iteration 32, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 3, Iteration 33, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 3, Iteration 34, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 3, Iteration 35, State: (4.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 3, Iteration 36, State: (4.0, 14.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 3, Iteration 37, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 3, Iteration 38, State: (5.0, 16.0, 140.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 3, Iteration 39, State: (5.0, 17.0, 145.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 3, Iteration 40, State: (5.0, 17.0, 145.0, 92.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 150.0, 93.0), Reward: 2, , Cumulative Score: 104, Action Source: Exploration\n", "Episode 3, Iteration 41, State: (5.0, 18.0, 150.0, 93.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 145.0, 92.0), Reward: 1, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 3, Iteration 42, State: (4.0, 17.0, 145.0, 92.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 3, Iteration 43, State: (3.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 3, Iteration 44, State: (3.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 3, Iteration 45, State: (3.0, 16.0, 140.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 3, Iteration 46, State: (3.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 3, Iteration 47, State: (3.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 3, Iteration 48, State: (3.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 3, Iteration 49, State: (3.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 3, Iteration 50, State: (3.0, 15.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 124, Action Source: Exploration\n", "Episode 3, Iteration 51, State: (3.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 3, Iteration 52, State: (3.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 130, Action Source: Exploration\n", "Episode 3, Iteration 53, State: (3.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 3, Iteration 54, State: (3.0, 14.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 137, Action Source: Exploration\n", "Episode 3, Iteration 55, State: (3.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 141, Action Source: Exploration\n", "Episode 3, Iteration 56, State: (3.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 3, Iteration 57, State: (3.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 3, Iteration 58, State: (3.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 3, Iteration 59, State: (3.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 3, Iteration 60, State: (3.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 161, Action Source: Exploitation\n", "Episode: 3 Best Action: 0 Best evaluation action: 0\n", "Episode: 3 Score: 161 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 4, Iteration 1, State: (5.0, 14.0, 119.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 88.0), Reward: 1, , Cumulative Score: 1, Action Source: Exploration\n", "Episode 4, Iteration 2, State: (5.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 88.0), Reward: 1, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 4, Iteration 3, State: (5.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 88.0), Reward: 1, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 4, Iteration 4, State: (5.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 88.0), Reward: 1, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 4, Iteration 5, State: (5.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 88.0), Reward: 1, , Cumulative Score: 5, Action Source: Model Prediction\n", "Episode 4, Iteration 6, State: (5.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 4, Iteration 7, State: (5.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 7, Action Source: Model Prediction\n", "Episode 4, Iteration 8, State: (5.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 4, Iteration 9, State: (5.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 4, Iteration 10, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 4, Iteration 11, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 17, Action Source: Exploration\n", "Episode 4, Iteration 12, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 4, Iteration 13, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 4, Iteration 14, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 4, Iteration 15, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 4, Iteration 16, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 4, Iteration 17, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 34, Action Source: Exploration\n", "Episode 4, Iteration 18, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 4, Iteration 19, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 4, Iteration 20, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 4, Iteration 21, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 4, Iteration 22, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 4, Iteration 23, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 4, Iteration 24, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 4, Iteration 25, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 4, Iteration 26, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 4, Iteration 27, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 4, Iteration 28, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 4, Iteration 29, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 4, Iteration 30, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 4, Iteration 31, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 4, Iteration 32, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 4, Iteration 33, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 4, Iteration 34, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 4, Iteration 35, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 4, Iteration 36, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 4, Iteration 37, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 4, Iteration 38, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 4, Iteration 39, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 4, Iteration 40, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 4, Iteration 41, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 4, Iteration 42, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 4, Iteration 43, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 4, Iteration 44, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 4, Iteration 45, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 4, Iteration 46, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 4, Iteration 47, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 4, Iteration 48, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 4, Iteration 49, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 4, Iteration 50, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 4, Iteration 51, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 4, Iteration 52, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 4, Iteration 53, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 4, Iteration 54, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 4, Iteration 55, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 4, Iteration 56, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 4, Iteration 57, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 152, Action Source: Exploitation\n", "Episode 4, Iteration 58, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 155, Action Source: Exploitation\n", "Episode 4, Iteration 59, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 158, Action Source: Model Prediction\n", "Episode 4, Iteration 60, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode: 4 Best Action: 0 Best evaluation action: 0\n", "Episode: 4 Score: 161 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 5, Iteration 1, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 5, Iteration 2, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 5, Iteration 3, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 12, Action Source: Exploitation\n", "Episode 5, Iteration 4, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 5, Iteration 5, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 5, Iteration 6, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 5, Iteration 7, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 5, Iteration 8, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 32, Action Source: Exploitation\n", "Episode 5, Iteration 9, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 5, Iteration 10, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 37, Action Source: Exploitation\n", "Episode 5, Iteration 11, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 5, Iteration 12, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 39, Action Source: Exploration\n", "Episode 5, Iteration 13, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 5, Iteration 14, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 5, Iteration 15, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 5, Iteration 16, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 5, Iteration 17, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 47, Action Source: Exploration\n", "Episode 5, Iteration 18, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 5, Iteration 19, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 55, Action Source: Exploitation\n", "Episode 5, Iteration 20, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 59, Action Source: Exploration\n", "Episode 5, Iteration 21, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 5, Iteration 22, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 5, Iteration 23, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 89.0), Reward: 1, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 5, Iteration 24, State: (5.0, 15.0, 135.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 89.0), Reward: 1, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 5, Iteration 25, State: (5.0, 15.0, 135.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 89.0), Reward: 1, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 5, Iteration 26, State: (5.0, 15.0, 135.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 89.0), Reward: 1, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 5, Iteration 27, State: (5.0, 15.0, 135.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 89.0), Reward: 1, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 5, Iteration 28, State: (5.0, 15.0, 135.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 89.0), Reward: 1, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 5, Iteration 29, State: (5.0, 15.0, 135.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 5, Iteration 30, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 5, Iteration 31, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 5, Iteration 32, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 5, Iteration 33, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 5, Iteration 34, State: (3.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 5, Iteration 35, State: (3.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 5, Iteration 36, State: (3.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 5, Iteration 37, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 5, Iteration 38, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 5, Iteration 39, State: (3.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 104, Action Source: Exploitation\n", "Episode 5, Iteration 40, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 5, Iteration 41, State: (4.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 5, Iteration 42, State: (3.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 5, Iteration 43, State: (3.0, 13.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 5, Iteration 44, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 5, Iteration 45, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 5, Iteration 46, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 5, Iteration 47, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 128, Action Source: Exploration\n", "Episode 5, Iteration 48, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 5, Iteration 49, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 5, Iteration 50, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 132, Action Source: Exploration\n", "Episode 5, Iteration 51, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 5, Iteration 52, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 5, Iteration 53, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode 5, Iteration 54, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 140, Action Source: Exploration\n", "Episode 5, Iteration 55, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 5, Iteration 56, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 5, Iteration 57, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 146, Action Source: Model Prediction\n", "Episode 5, Iteration 58, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 5, Iteration 59, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 150, Action Source: Exploration\n", "Episode 5, Iteration 60, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 152, Action Source: Exploration\n", "Episode: 5 Best Action: 0 Best evaluation action: 2\n", "Episode: 5 Score: 152 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 6, Iteration 1, State: (4.0, 15.0, 117.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 6, Iteration 2, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 6, Iteration 3, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 6, Iteration 4, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 6, Iteration 5, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 6, Iteration 6, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 6, Iteration 7, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 6, Iteration 8, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 6, Iteration 9, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 6, Iteration 10, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 6, Iteration 11, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 6, Iteration 12, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 6, Iteration 13, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 6, Iteration 14, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 6, Iteration 15, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 6, Iteration 16, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 6, Iteration 17, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 6, Iteration 18, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 6, Iteration 19, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 6, Iteration 20, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 6, Iteration 21, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 6, Iteration 22, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 6, Iteration 23, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 6, Iteration 24, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 6, Iteration 25, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 6, Iteration 26, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 6, Iteration 27, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 6, Iteration 28, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 6, Iteration 29, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 6, Iteration 30, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 6, Iteration 31, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 6, Iteration 32, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 6, Iteration 33, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 6, Iteration 34, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 6, Iteration 35, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 6, Iteration 36, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 6, Iteration 37, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 6, Iteration 38, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 6, Iteration 39, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 6, Iteration 40, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 6, Iteration 41, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 6, Iteration 42, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 6, Iteration 43, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 129, Action Source: Exploration\n", "Episode 6, Iteration 44, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 6, Iteration 45, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 135, Action Source: Exploration\n", "Episode 6, Iteration 46, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode 6, Iteration 47, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 141, Action Source: Exploration\n", "Episode 6, Iteration 48, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 6, Iteration 49, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 147, Action Source: Model Prediction\n", "Episode 6, Iteration 50, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 150, Action Source: Model Prediction\n", "Episode 6, Iteration 51, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 6, Iteration 52, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 156, Action Source: Exploration\n", "Episode 6, Iteration 53, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 159, Action Source: Exploration\n", "Episode 6, Iteration 54, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 162, Action Source: Model Prediction\n", "Episode 6, Iteration 55, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 165, Action Source: Exploration\n", "Episode 6, Iteration 56, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 168, Action Source: Exploration\n", "Episode 6, Iteration 57, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 171, Action Source: Exploration\n", "Episode 6, Iteration 58, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 174, Action Source: Exploration\n", "Episode 6, Iteration 59, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 177, Action Source: Model Prediction\n", "Episode 6, Iteration 60, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 180, Action Source: Exploration\n", "Episode: 6 Best Action: 0 Best evaluation action: 2\n", "Episode: 6 Score: 180 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 7, Iteration 1, State: (4.0, 16.0, 121.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 7, Iteration 2, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 7, Iteration 3, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 7, Iteration 4, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 7, Iteration 5, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 7, Iteration 6, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 7, Iteration 7, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 7, Iteration 8, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 7, Iteration 9, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 31, Action Source: Exploration\n", "Episode 7, Iteration 10, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 34, Action Source: Exploration\n", "Episode 7, Iteration 11, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 37, Action Source: Model Prediction\n", "Episode 7, Iteration 12, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 7, Iteration 13, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 43, Action Source: Exploration\n", "Episode 7, Iteration 14, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 7, Iteration 15, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 7, Iteration 16, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 7, Iteration 17, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 7, Iteration 18, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 7, Iteration 19, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 7, Iteration 20, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 56, Action Source: Exploration\n", "Episode 7, Iteration 21, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 7, Iteration 22, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 7, Iteration 23, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 7, Iteration 24, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 7, Iteration 25, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 7, Iteration 26, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 7, Iteration 27, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 7, Iteration 28, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 7, Iteration 29, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 7, Iteration 30, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 7, Iteration 31, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 7, Iteration 32, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 7, Iteration 33, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 7, Iteration 34, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 7, Iteration 35, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 7, Iteration 36, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 7, Iteration 37, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 7, Iteration 38, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 7, Iteration 39, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 7, Iteration 40, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 7, Iteration 41, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 7, Iteration 42, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 7, Iteration 43, State: (3.0, 6.0, 120.0, 79.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 78.0), Reward: 2, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 7, Iteration 44, State: (3.0, 5.0, 120.0, 78.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 78.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 7, Iteration 45, State: (3.0, 5.0, 120.0, 78.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 77.0), Reward: 2, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 7, Iteration 46, State: (3.0, 4.0, 120.0, 77.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 5.0, 125.0, 78.0), Reward: 1, , Cumulative Score: 104, Action Source: Exploration\n", "Episode 7, Iteration 47, State: (4.0, 5.0, 125.0, 78.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 77.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 7, Iteration 48, State: (3.0, 4.0, 120.0, 77.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 5.0, 125.0, 78.0), Reward: 1, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 7, Iteration 49, State: (4.0, 5.0, 125.0, 78.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 5.0, 125.0, 78.0), Reward: 1, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 7, Iteration 50, State: (4.0, 5.0, 125.0, 78.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 5.0, 125.0, 78.0), Reward: 1, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 7, Iteration 51, State: (4.0, 5.0, 125.0, 78.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 77.0), Reward: 2, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 7, Iteration 52, State: (3.0, 4.0, 120.0, 77.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 77.0), Reward: 2, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 7, Iteration 53, State: (3.0, 4.0, 120.0, 77.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 3.0, 120.0, 76.0), Reward: 2, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 7, Iteration 54, State: (3.0, 3.0, 120.0, 76.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 3.0, 120.0, 76.0), Reward: 2, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 7, Iteration 55, State: (3.0, 3.0, 120.0, 76.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 3.0, 120.0, 76.0), Reward: 2, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 7, Iteration 56, State: (3.0, 3.0, 120.0, 76.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 4.0, 125.0, 77.0), Reward: 1, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 7, Iteration 57, State: (4.0, 4.0, 125.0, 77.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 4.0, 125.0, 77.0), Reward: 1, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 7, Iteration 58, State: (4.0, 4.0, 125.0, 77.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 4.0, 125.0, 77.0), Reward: 1, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 7, Iteration 59, State: (4.0, 4.0, 125.0, 77.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 4.0, 125.0, 77.0), Reward: 1, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 7, Iteration 60, State: (4.0, 4.0, 125.0, 77.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 5.0, 130.0, 78.0), Reward: 1, , Cumulative Score: 124, Action Source: Exploration\n", "Episode: 7 Best Action: 0 Best evaluation action: 0\n", "Episode: 7 Score: 124 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 8, Iteration 1, State: (4.0, 15.0, 123.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 8, Iteration 2, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 8, Iteration 3, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 8, Iteration 4, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 13, Action Source: Exploration\n", "Episode 8, Iteration 5, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 17, Action Source: Exploration\n", "Episode 8, Iteration 6, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 8, Iteration 7, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 8, Iteration 8, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 8, Iteration 9, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 27, Action Source: Exploration\n", "Episode 8, Iteration 10, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 8, Iteration 11, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 8, Iteration 12, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 8, Iteration 13, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 8, Iteration 14, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 8, Iteration 15, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 8, Iteration 16, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 8, Iteration 17, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 8, Iteration 18, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 8, Iteration 19, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 8, Iteration 20, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 8, Iteration 21, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 8, Iteration 22, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 56, Action Source: Exploration\n", "Episode 8, Iteration 23, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 8, Iteration 24, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 8, Iteration 25, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 8, Iteration 26, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 8, Iteration 27, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 8, Iteration 28, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 8, Iteration 29, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 8, Iteration 30, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 8, Iteration 31, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 8, Iteration 32, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 8, Iteration 33, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 8, Iteration 34, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 8, Iteration 35, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 8, Iteration 36, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 8, Iteration 37, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 8, Iteration 38, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 8, Iteration 39, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 8, Iteration 40, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 8, Iteration 41, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 8, Iteration 42, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 8, Iteration 43, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 8, Iteration 44, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 8, Iteration 45, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 9.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 8, Iteration 46, State: (4.0, 9.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 9.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 8, Iteration 47, State: (4.0, 9.0, 125.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 10.0, 130.0, 83.0), Reward: 1, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 8, Iteration 48, State: (5.0, 10.0, 130.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 11.0, 135.0, 84.0), Reward: 1, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 8, Iteration 49, State: (5.0, 11.0, 135.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 11.0, 135.0, 84.0), Reward: 1, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 8, Iteration 50, State: (5.0, 11.0, 135.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 10.0, 130.0, 83.0), Reward: 1, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 8, Iteration 51, State: (4.0, 10.0, 130.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 130.0, 83.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 8, Iteration 52, State: (4.0, 10.0, 130.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 130.0, 83.0), Reward: 1, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 8, Iteration 53, State: (4.0, 10.0, 130.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 130.0, 83.0), Reward: 1, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 8, Iteration 54, State: (4.0, 10.0, 130.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 11.0, 135.0, 84.0), Reward: 1, , Cumulative Score: 100, Action Source: Exploration\n", "Episode 8, Iteration 55, State: (5.0, 11.0, 135.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 12.0, 140.0, 85.0), Reward: 1, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 8, Iteration 56, State: (5.0, 12.0, 140.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 145.0, 86.0), Reward: 1, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 8, Iteration 57, State: (5.0, 13.0, 145.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 150.0, 87.0), Reward: 1, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 8, Iteration 58, State: (5.0, 14.0, 150.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 150.0, 87.0), Reward: 1, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 8, Iteration 59, State: (5.0, 14.0, 150.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 145.0, 86.0), Reward: 1, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 8, Iteration 60, State: (4.0, 13.0, 145.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 145.0, 86.0), Reward: 1, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode: 8 Best Action: 0 Best evaluation action: 2\n", "Episode: 8 Score: 106 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 9, Iteration 1, State: (4.0, 15.0, 123.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 9, Iteration 2, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 9, Iteration 3, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 9, Iteration 4, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 9, Iteration 5, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 9, Iteration 6, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 9, Iteration 7, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 9, Iteration 8, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 9, Iteration 9, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 9, Iteration 10, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 34, Action Source: Exploration\n", "Episode 9, Iteration 11, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 9, Iteration 12, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 9, Iteration 13, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploitation\n", "Episode 9, Iteration 14, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 9, Iteration 15, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 9, Iteration 16, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 9, Iteration 17, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 9, Iteration 18, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 9, Iteration 19, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 9, Iteration 20, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 9, Iteration 21, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 9, Iteration 22, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 9, Iteration 23, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 9, Iteration 24, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 9, Iteration 25, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 9, Iteration 26, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 9, Iteration 27, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 77, Action Source: Exploration\n", "Episode 9, Iteration 28, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 9, Iteration 29, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 9, Iteration 30, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 9, Iteration 31, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 9, Iteration 32, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 9, Iteration 33, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 9, Iteration 34, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 9, Iteration 35, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 9, Iteration 36, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 9, Iteration 37, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 9, Iteration 38, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 9, Iteration 39, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 9, Iteration 40, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 9, Iteration 41, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 9, Iteration 42, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 9, Iteration 43, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 9, Iteration 44, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 88.0), Reward: 1, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 9, Iteration 45, State: (5.0, 15.0, 140.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 88.0), Reward: 1, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 9, Iteration 46, State: (5.0, 15.0, 140.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 87.0), Reward: 2, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 9, Iteration 47, State: (4.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 87.0), Reward: 2, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 9, Iteration 48, State: (4.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 87.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 9, Iteration 49, State: (4.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 87.0), Reward: 2, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 9, Iteration 50, State: (4.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 87.0), Reward: 2, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 9, Iteration 51, State: (4.0, 14.0, 135.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 88.0), Reward: 1, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 9, Iteration 52, State: (5.0, 15.0, 140.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 145.0, 89.0), Reward: 2, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 9, Iteration 53, State: (5.0, 16.0, 145.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 145.0, 89.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 9, Iteration 54, State: (5.0, 16.0, 145.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 150.0, 90.0), Reward: 3, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 9, Iteration 55, State: (5.0, 17.0, 150.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 150.0, 90.0), Reward: 3, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 9, Iteration 56, State: (5.0, 17.0, 150.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 155.0, 91.0), Reward: 3, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 9, Iteration 57, State: (5.0, 18.0, 155.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 155.0, 91.0), Reward: 3, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 9, Iteration 58, State: (5.0, 18.0, 155.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 155.0, 91.0), Reward: 3, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 9, Iteration 59, State: (5.0, 18.0, 155.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 155.0, 91.0), Reward: 3, , Cumulative Score: 128, Action Source: Exploration\n", "Episode 9, Iteration 60, State: (5.0, 18.0, 155.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 155.0, 91.0), Reward: 3, , Cumulative Score: 131, Action Source: Exploration\n", "Episode: 9 Best Action: 0 Best evaluation action: 2\n", "Episode: 9 Score: 131 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 10, Iteration 1, State: (3.0, 16.0, 119.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 1, Action Source: Exploration\n", "Episode 10, Iteration 2, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 10, Iteration 3, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 10, Iteration 4, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 10, Iteration 5, State: (5.0, 18.0, 130.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 7, Action Source: Exploration\n", "Episode 10, Iteration 6, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 10, Iteration 7, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 10, Iteration 8, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 10, Iteration 9, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 10, Iteration 10, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 10, Iteration 11, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 10, Iteration 12, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 10, Iteration 13, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 10, Iteration 14, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 10, Iteration 15, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 10, Iteration 16, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 10, Iteration 17, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 33, Action Source: Exploration\n", "Episode 10, Iteration 18, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 37, Action Source: Exploration\n", "Episode 10, Iteration 19, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 10, Iteration 20, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 10, Iteration 21, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 10, Iteration 22, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 10, Iteration 23, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 10, Iteration 24, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 10, Iteration 25, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 10, Iteration 26, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 10, Iteration 27, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 71, Action Source: Exploration\n", "Episode 10, Iteration 28, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 10, Iteration 29, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 10, Iteration 30, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 10, Iteration 31, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 10, Iteration 32, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 10, Iteration 33, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 10, Iteration 34, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 10, Iteration 35, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 10, Iteration 36, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 10, Iteration 37, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 10, Iteration 38, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 10, Iteration 39, State: (5.0, 14.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 10, Iteration 40, State: (5.0, 14.0, 135.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 89.0), Reward: 1, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 10, Iteration 41, State: (5.0, 15.0, 140.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 89.0), Reward: 1, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 10, Iteration 42, State: (5.0, 15.0, 140.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 10, Iteration 43, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 10, Iteration 44, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 130.0, 87.0), Reward: 4, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 10, Iteration 45, State: (3.0, 13.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 10, Iteration 46, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 10, Iteration 47, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 10, Iteration 48, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 89.0), Reward: 1, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 10, Iteration 49, State: (5.0, 15.0, 140.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 140.0, 89.0), Reward: 1, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 10, Iteration 50, State: (5.0, 15.0, 140.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 10, Iteration 51, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 130.0, 87.0), Reward: 4, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 10, Iteration 52, State: (3.0, 13.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 10, Iteration 53, State: (4.0, 14.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 130.0, 87.0), Reward: 4, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 10, Iteration 54, State: (3.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 130.0, 87.0), Reward: 4, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 10, Iteration 55, State: (3.0, 13.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 129, Action Source: Exploration\n", "Episode 10, Iteration 56, State: (3.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 130, Action Source: Exploration\n", "Episode 10, Iteration 57, State: (4.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 10, Iteration 58, State: (4.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 10, Iteration 59, State: (4.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 10, Iteration 60, State: (4.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode: 10 Best Action: 0 Best evaluation action: 0\n", "Episode: 10 Score: 134 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 11, Iteration 1, State: (5.0, 16.0, 121.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 11, Iteration 2, State: (5.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 11, Iteration 3, State: (5.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 11, Iteration 4, State: (5.0, 16.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 11, Iteration 5, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 11, Iteration 6, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 11, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 23, Action Source: Exploitation\n", "Episode 11, Iteration 8, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 27, Action Source: Exploitation\n", "Episode 11, Iteration 9, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 31, Action Source: Model Prediction\n", "Episode 11, Iteration 10, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 35, Action Source: Exploitation\n", "Episode 11, Iteration 11, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 11, Iteration 12, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 11, Iteration 13, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 11, Iteration 14, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 11, Iteration 15, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 11, Iteration 16, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 11, Iteration 17, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 11, Iteration 18, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 11, Iteration 19, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 11, Iteration 20, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 11, Iteration 21, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 11, Iteration 22, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 11, Iteration 23, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 11, Iteration 24, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 11, Iteration 25, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 11, Iteration 26, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 11, Iteration 27, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 11, Iteration 28, State: (5.0, 14.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 11, Iteration 29, State: (5.0, 14.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 11, Iteration 30, State: (4.0, 13.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 11, Iteration 31, State: (3.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 11, Iteration 32, State: (3.0, 12.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 11, Iteration 33, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 11, Iteration 34, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 11, Iteration 35, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 11, Iteration 36, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 11, Iteration 37, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 11, Iteration 38, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 11, Iteration 39, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 11, Iteration 40, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 11, Iteration 41, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 11, Iteration 42, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 11, Iteration 43, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 11, Iteration 44, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 11, Iteration 45, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 11, Iteration 46, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 11, Iteration 47, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 11, Iteration 48, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 11, Iteration 49, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 11, Iteration 50, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 11, Iteration 51, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 11, Iteration 52, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 11, Iteration 53, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 11, Iteration 54, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 11, Iteration 55, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 11, Iteration 56, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 11, Iteration 57, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 11, Iteration 58, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 11, Iteration 59, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 11, Iteration 60, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 111, Action Source: Exploration\n", "Episode: 11 Best Action: 0 Best evaluation action: 2\n", "Episode: 11 Score: 111 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 12, Iteration 1, State: (3.0, 16.0, 124.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 12, Iteration 2, State: (3.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 12, Iteration 3, State: (3.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 12, Iteration 4, State: (3.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 12, Iteration 5, State: (3.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 12, Iteration 6, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 12, Iteration 7, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 19, Action Source: Model Prediction\n", "Episode 12, Iteration 8, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 12, Iteration 9, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 12, Iteration 10, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 12, Iteration 11, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 31, Action Source: Exploration\n", "Episode 12, Iteration 12, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 12, Iteration 13, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 12, Iteration 14, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 12, Iteration 15, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 12, Iteration 16, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 12, Iteration 17, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 54, Action Source: Exploitation\n", "Episode 12, Iteration 18, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 12, Iteration 19, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 12, Iteration 20, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 12, Iteration 21, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 12, Iteration 22, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 12, Iteration 23, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 12, Iteration 24, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 12, Iteration 25, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 12, Iteration 26, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 12, Iteration 27, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 12, Iteration 28, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 12, Iteration 29, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 12, Iteration 30, State: (4.0, 11.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 12, Iteration 31, State: (4.0, 11.0, 125.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 12, Iteration 32, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 12, Iteration 33, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 12, Iteration 34, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 12, Iteration 35, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 12, Iteration 36, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 12, Iteration 37, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 12, Iteration 38, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 12, Iteration 39, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 12, Iteration 40, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 12, Iteration 41, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 12, Iteration 42, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 12, Iteration 43, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 12, Iteration 44, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 12, Iteration 45, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 12, Iteration 46, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 12, Iteration 47, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 12, Iteration 48, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 12, Iteration 49, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 9.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 12, Iteration 50, State: (5.0, 9.0, 130.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 9.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 12, Iteration 51, State: (5.0, 9.0, 130.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 9.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 12, Iteration 52, State: (5.0, 9.0, 130.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 9.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 12, Iteration 53, State: (5.0, 9.0, 130.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 12, Iteration 54, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 12, Iteration 55, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 12, Iteration 56, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 12, Iteration 57, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 12, Iteration 58, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 12, Iteration 59, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 12, Iteration 60, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode: 12 Best Action: 0 Best evaluation action: 0\n", "Episode: 12 Score: 126 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 13, Iteration 1, State: (4.0, 16.0, 121.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 13, Iteration 2, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 13, Iteration 3, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 13, Iteration 4, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 13, Iteration 5, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 13, Iteration 6, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 13, Iteration 7, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploitation\n", "Episode 13, Iteration 8, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 13, Iteration 9, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 13, Iteration 10, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 27, Action Source: Exploration\n", "Episode 13, Iteration 11, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 31, Action Source: Exploitation\n", "Episode 13, Iteration 12, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 13, Iteration 13, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 39, Action Source: Exploration\n", "Episode 13, Iteration 14, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 13, Iteration 15, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 46, Action Source: Exploitation\n", "Episode 13, Iteration 16, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 13, Iteration 17, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 13, Iteration 18, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 13, Iteration 19, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 59, Action Source: Exploration\n", "Episode 13, Iteration 20, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 13, Iteration 21, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 13, Iteration 22, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 71, Action Source: Exploration\n", "Episode 13, Iteration 23, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 13, Iteration 24, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 13, Iteration 25, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 83, Action Source: Exploitation\n", "Episode 13, Iteration 26, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 86, Action Source: Exploitation\n", "Episode 13, Iteration 27, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 13, Iteration 28, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 13, Iteration 29, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 13, Iteration 30, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 13, Iteration 31, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 13, Iteration 32, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 13, Iteration 33, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 13, Iteration 34, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 13, Iteration 35, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 13, Iteration 36, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 13, Iteration 37, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 13, Iteration 38, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 13, Iteration 39, State: (5.0, 17.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 13, Iteration 40, State: (5.0, 17.0, 140.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 13, Iteration 41, State: (5.0, 18.0, 145.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 140.0, 91.0), Reward: 1, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 13, Iteration 42, State: (4.0, 17.0, 140.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 13, Iteration 43, State: (3.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 13, Iteration 44, State: (3.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 13, Iteration 45, State: (3.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 13, Iteration 46, State: (3.0, 16.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 129, Action Source: Exploration\n", "Episode 13, Iteration 47, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 13, Iteration 48, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 13, Iteration 49, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 135, Action Source: Exploration\n", "Episode 13, Iteration 50, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 13, Iteration 51, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 139, Action Source: Exploration\n", "Episode 13, Iteration 52, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 13, Iteration 53, State: (3.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 145, Action Source: Exploration\n", "Episode 13, Iteration 54, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 13, Iteration 55, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 153, Action Source: Exploitation\n", "Episode 13, Iteration 56, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 13, Iteration 57, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 13, Iteration 58, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 165, Action Source: Model Prediction\n", "Episode 13, Iteration 59, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 169, Action Source: Model Prediction\n", "Episode 13, Iteration 60, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 173, Action Source: Model Prediction\n", "Episode: 13 Best Action: 0 Best evaluation action: 0\n", "Episode: 13 Score: 173 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 14, Iteration 1, State: (3.0, 15.0, 118.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 14, Iteration 2, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 7, Action Source: Exploration\n", "Episode 14, Iteration 3, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 11, Action Source: Model Prediction\n", "Episode 14, Iteration 4, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 14, Iteration 5, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 14, Iteration 6, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 14, Iteration 7, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 14, Iteration 8, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 14, Iteration 9, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 34, Action Source: Exploitation\n", "Episode 14, Iteration 10, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 14, Iteration 11, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 14, Iteration 12, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 14, Iteration 13, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 14, Iteration 14, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 14, Iteration 15, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 14, Iteration 16, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 55, Action Source: Exploration\n", "Episode 14, Iteration 17, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 14, Iteration 18, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 59, Action Source: Exploration\n", "Episode 14, Iteration 19, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 14, Iteration 20, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 14, Iteration 21, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 14, Iteration 22, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 14, Iteration 23, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 14, Iteration 24, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 14, Iteration 25, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 14, Iteration 26, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 14, Iteration 27, State: (5.0, 18.0, 140.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 14, Iteration 28, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 14, Iteration 29, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 14, Iteration 30, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 14, Iteration 31, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 14, Iteration 32, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 14, Iteration 33, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 14, Iteration 34, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 14, Iteration 35, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 14, Iteration 36, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 14, Iteration 37, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 14, Iteration 38, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 14, Iteration 39, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 14, Iteration 40, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 14, Iteration 41, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 14, Iteration 42, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 14, Iteration 43, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 14, Iteration 44, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 126, Action Source: Exploitation\n", "Episode 14, Iteration 45, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 14, Iteration 46, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 14, Iteration 47, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 14, Iteration 48, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 140, Action Source: Exploration\n", "Episode 14, Iteration 49, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 14, Iteration 50, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 148, Action Source: Exploration\n", "Episode 14, Iteration 51, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 150, Action Source: Exploration\n", "Episode 14, Iteration 52, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 14, Iteration 53, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 154, Action Source: Model Prediction\n", "Episode 14, Iteration 54, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 14, Iteration 55, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 14, Iteration 56, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 14, Iteration 57, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 162, Action Source: Model Prediction\n", "Episode 14, Iteration 58, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 166, Action Source: Exploration\n", "Episode 14, Iteration 59, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 170, Action Source: Model Prediction\n", "Episode 14, Iteration 60, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 174, Action Source: Model Prediction\n", "Episode: 14 Best Action: 0 Best evaluation action: 0\n", "Episode: 14 Score: 174 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 15, Iteration 1, State: (3.0, 16.0, 115.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 15, Iteration 2, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 15, Iteration 3, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 15, Iteration 4, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 15, Iteration 5, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 15, Iteration 6, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 15, Iteration 7, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 15, Iteration 8, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 15, Iteration 9, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 15, Iteration 10, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 15, Iteration 11, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 33, Action Source: Exploration\n", "Episode 15, Iteration 12, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 15, Iteration 13, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 15, Iteration 14, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 15, Iteration 15, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 15, Iteration 16, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 49, Action Source: Exploration\n", "Episode 15, Iteration 17, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 15, Iteration 18, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 15, Iteration 19, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 59, Action Source: Exploration\n", "Episode 15, Iteration 20, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 15, Iteration 21, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 15, Iteration 22, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 15, Iteration 23, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 15, Iteration 24, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 15, Iteration 25, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 15, Iteration 26, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 83, Action Source: Exploitation\n", "Episode 15, Iteration 27, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 87, Action Source: Exploitation\n", "Episode 15, Iteration 28, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 15, Iteration 29, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 95, Action Source: Exploitation\n", "Episode 15, Iteration 30, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 15, Iteration 31, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 15, Iteration 32, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 15, Iteration 33, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 15, Iteration 34, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 15, Iteration 35, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 15, Iteration 36, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 15, Iteration 37, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 15, Iteration 38, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 15, Iteration 39, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 15, Iteration 40, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 15, Iteration 41, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 15, Iteration 42, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 15, Iteration 43, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 131, Action Source: Exploration\n", "Episode 15, Iteration 44, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 15, Iteration 45, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 134, Action Source: Exploration\n", "Episode 15, Iteration 46, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 15, Iteration 47, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 15, Iteration 48, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 137, Action Source: Exploration\n", "Episode 15, Iteration 49, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode 15, Iteration 50, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 15, Iteration 51, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 141, Action Source: Exploration\n", "Episode 15, Iteration 52, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 144, Action Source: Exploration\n", "Episode 15, Iteration 53, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 147, Action Source: Model Prediction\n", "Episode 15, Iteration 54, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 150, Action Source: Model Prediction\n", "Episode 15, Iteration 55, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 15, Iteration 56, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 156, Action Source: Exploration\n", "Episode 15, Iteration 57, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 15, Iteration 58, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 15, Iteration 59, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 162, Action Source: Model Prediction\n", "Episode 15, Iteration 60, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 164, Action Source: Exploration\n", "Episode: 15 Best Action: 0 Best evaluation action: 0\n", "Episode: 15 Score: 164 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0 Best Action Source: Exploration\n", "Episode 16, Iteration 1, State: (4.0, 16.0, 121.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 16, Iteration 2, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 16, Iteration 3, State: (5.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 16, Iteration 4, State: (5.0, 17.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 16, Iteration 5, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 16, Iteration 6, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 16, Iteration 7, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 16, Iteration 8, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 16, Iteration 9, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 16, Iteration 10, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 16, Iteration 11, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 31, Action Source: Exploration\n", "Episode 16, Iteration 12, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 16, Iteration 13, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 39, Action Source: Exploration\n", "Episode 16, Iteration 14, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 16, Iteration 15, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 16, Iteration 16, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 16, Iteration 17, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 16, Iteration 18, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 16, Iteration 19, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 16, Iteration 20, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 16, Iteration 21, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 16, Iteration 22, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 16, Iteration 23, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 16, Iteration 24, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 16, Iteration 25, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 16, Iteration 26, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 16, Iteration 27, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 16, Iteration 28, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 16, Iteration 29, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 16, Iteration 30, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 16, Iteration 31, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 16, Iteration 32, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 16, Iteration 33, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 16, Iteration 34, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 16, Iteration 35, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 16, Iteration 36, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 16, Iteration 37, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 16, Iteration 38, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 16, Iteration 39, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 16, Iteration 40, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 16, Iteration 41, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 134, Action Source: Exploration\n", "Episode 16, Iteration 42, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 138, Action Source: Exploration\n", "Episode 16, Iteration 43, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 16, Iteration 44, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 146, Action Source: Exploitation\n", "Episode 16, Iteration 45, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 150, Action Source: Exploitation\n", "Episode 16, Iteration 46, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 154, Action Source: Exploration\n", "Episode 16, Iteration 47, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 155, Action Source: Exploration\n", "Episode 16, Iteration 48, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 16, Iteration 49, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 16, Iteration 50, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 16, Iteration 51, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 16, Iteration 52, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 160, Action Source: Exploration\n", "Episode 16, Iteration 53, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 16, Iteration 54, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 162, Action Source: Model Prediction\n", "Episode 16, Iteration 55, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 163, Action Source: Exploration\n", "Episode 16, Iteration 56, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 164, Action Source: Model Prediction\n", "Episode 16, Iteration 57, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 168, Action Source: Exploration\n", "Episode 16, Iteration 58, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 172, Action Source: Model Prediction\n", "Episode 16, Iteration 59, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 176, Action Source: Exploitation\n", "Episode 16, Iteration 60, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 178, Action Source: Exploration\n", "Episode: 16 Best Action: 0 Best evaluation action: 0\n", "Episode: 16 Score: 178 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 17, Iteration 1, State: (5.0, 14.0, 123.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 17, Iteration 2, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 17, Iteration 3, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 17, Iteration 4, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 17, Iteration 5, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 17, Iteration 6, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 17, Iteration 7, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 17, Iteration 8, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 17, Iteration 9, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 17, Iteration 10, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 17, Iteration 11, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 17, Iteration 12, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 17, Iteration 13, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 17, Iteration 14, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 17, Iteration 15, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 17, Iteration 16, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 17, Iteration 17, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 17, Iteration 18, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 17, Iteration 19, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 17, Iteration 20, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 17, Iteration 21, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 17, Iteration 22, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 17, Iteration 23, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 17, Iteration 24, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 17, Iteration 25, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 17, Iteration 26, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 56, Action Source: Exploration\n", "Episode 17, Iteration 27, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 17, Iteration 28, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 17, Iteration 29, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 17, Iteration 30, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 17, Iteration 31, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 17, Iteration 32, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 17, Iteration 33, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 17, Iteration 34, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 17, Iteration 35, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 17, Iteration 36, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 17, Iteration 37, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 17, Iteration 38, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 17, Iteration 39, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 17, Iteration 40, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 17, Iteration 41, State: (5.0, 12.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 17, Iteration 42, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 17, Iteration 43, State: (5.0, 12.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 17, Iteration 44, State: (5.0, 12.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 17, Iteration 45, State: (5.0, 12.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 17, Iteration 46, State: (5.0, 12.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 17, Iteration 47, State: (5.0, 12.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 17, Iteration 48, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 17, Iteration 49, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 17, Iteration 50, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 17, Iteration 51, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 17, Iteration 52, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 17, Iteration 53, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 17, Iteration 54, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 17, Iteration 55, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 17, Iteration 56, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 17, Iteration 57, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 17, Iteration 58, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 17, Iteration 59, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 17, Iteration 60, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 130, Action Source: Exploration\n", "Episode: 17 Best Action: 0 Best evaluation action: 2\n", "Episode: 17 Score: 130 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 18, Iteration 1, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 18, Iteration 2, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 18, Iteration 3, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 18, Iteration 4, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 18, Iteration 5, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 11, Action Source: Model Prediction\n", "Episode 18, Iteration 6, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 18, Iteration 7, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 18, Iteration 8, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 18, Iteration 9, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 18, Iteration 10, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 18, Iteration 11, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 18, Iteration 12, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 18, Iteration 13, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 18, Iteration 14, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 18, Iteration 15, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 18, Iteration 16, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 18, Iteration 17, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 18, Iteration 18, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 18, Iteration 19, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 18, Iteration 20, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 18, Iteration 21, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 18, Iteration 22, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 18, Iteration 23, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 18, Iteration 24, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 18, Iteration 25, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 18, Iteration 26, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 18, Iteration 27, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 18, Iteration 28, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 71, Action Source: Exploration\n", "Episode 18, Iteration 29, State: (5.0, 18.0, 140.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 18, Iteration 30, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 18, Iteration 31, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 77, Action Source: Exploration\n", "Episode 18, Iteration 32, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 18, Iteration 33, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 18, Iteration 34, State: (4.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 18, Iteration 35, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 18, Iteration 36, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 18, Iteration 37, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 18, Iteration 38, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 18, Iteration 39, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 18, Iteration 40, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 18, Iteration 41, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 18, Iteration 42, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 18, Iteration 43, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 18, Iteration 44, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 18, Iteration 45, State: (3.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 18, Iteration 46, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 18, Iteration 47, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 18, Iteration 48, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 18, Iteration 49, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 18, Iteration 50, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 18, Iteration 51, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 18, Iteration 52, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 18, Iteration 53, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 18, Iteration 54, State: (5.0, 17.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 18, Iteration 55, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 18, Iteration 56, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 139, Action Source: Exploration\n", "Episode 18, Iteration 57, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 18, Iteration 58, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 145, Action Source: Exploration\n", "Episode 18, Iteration 59, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 148, Action Source: Exploration\n", "Episode 18, Iteration 60, State: (4.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 151, Action Source: Exploration\n", "Episode: 18 Best Action: 0 Best evaluation action: 2\n", "Episode: 18 Score: 151 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 19, Iteration 1, State: (3.0, 16.0, 122.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 19, Iteration 2, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 19, Iteration 3, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 19, Iteration 4, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 19, Iteration 5, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 19, Iteration 6, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 19, Iteration 7, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 19, Iteration 8, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 19, Iteration 9, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 27, Action Source: Exploration\n", "Episode 19, Iteration 10, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 19, Iteration 11, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 19, Iteration 12, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 19, Iteration 13, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 19, Iteration 14, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 19, Iteration 15, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 19, Iteration 16, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 19, Iteration 17, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 19, Iteration 18, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 19, Iteration 19, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 19, Iteration 20, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 19, Iteration 21, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 19, Iteration 22, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 19, Iteration 23, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 19, Iteration 24, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 19, Iteration 25, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 71, Action Source: Exploration\n", "Episode 19, Iteration 26, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 19, Iteration 27, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 19, Iteration 28, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 19, Iteration 29, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 19, Iteration 30, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 19, Iteration 31, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 19, Iteration 32, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 19, Iteration 33, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 19, Iteration 34, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 19, Iteration 35, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 19, Iteration 36, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 19, Iteration 37, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 19, Iteration 38, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 19, Iteration 39, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 19, Iteration 40, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 19, Iteration 41, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 19, Iteration 42, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 19, Iteration 43, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 19, Iteration 44, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 19, Iteration 45, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 19, Iteration 46, State: (4.0, 10.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 19, Iteration 47, State: (4.0, 10.0, 125.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 19, Iteration 48, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 19, Iteration 49, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 19, Iteration 50, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 19, Iteration 51, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 19, Iteration 52, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 19, Iteration 53, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 19, Iteration 54, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 19, Iteration 55, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 132, Action Source: Exploration\n", "Episode 19, Iteration 56, State: (3.0, 8.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 19, Iteration 57, State: (3.0, 8.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 19, Iteration 58, State: (3.0, 8.0, 120.0, 80.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 138, Action Source: Exploration\n", "Episode 19, Iteration 59, State: (3.0, 7.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 140, Action Source: Exploration\n", "Episode 19, Iteration 60, State: (3.0, 7.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode: 19 Best Action: 0 Best evaluation action: 0\n", "Episode: 19 Score: 142 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0 Best Action Source: Model Prediction\n", "Episode 20, Iteration 1, State: (4.0, 15.0, 119.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 20, Iteration 2, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 20, Iteration 3, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 20, Iteration 4, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 20, Iteration 5, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 20, Iteration 6, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 20, Iteration 7, State: (5.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 20, Iteration 8, State: (5.0, 16.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 20, Iteration 9, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 20, Iteration 10, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 20, Iteration 11, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 34, Action Source: Exploration\n", "Episode 20, Iteration 12, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 20, Iteration 13, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 20, Iteration 14, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 46, Action Source: Exploitation\n", "Episode 20, Iteration 15, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 20, Iteration 16, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 20, Iteration 17, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 20, Iteration 18, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 20, Iteration 19, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 20, Iteration 20, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 20, Iteration 21, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 20, Iteration 22, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 20, Iteration 23, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 20, Iteration 24, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 20, Iteration 25, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 20, Iteration 26, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 20, Iteration 27, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 20, Iteration 28, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 20, Iteration 29, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 20, Iteration 30, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 20, Iteration 31, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 20, Iteration 32, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 20, Iteration 33, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 20, Iteration 34, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 20, Iteration 35, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 20, Iteration 36, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 20, Iteration 37, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 20, Iteration 38, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 20, Iteration 39, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 20, Iteration 40, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 128, Action Source: Exploration\n", "Episode 20, Iteration 41, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 20, Iteration 42, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 136, Action Source: Exploitation\n", "Episode 20, Iteration 43, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 20, Iteration 44, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 144, Action Source: Exploration\n", "Episode 20, Iteration 45, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 20, Iteration 46, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 152, Action Source: Exploration\n", "Episode 20, Iteration 47, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 20, Iteration 48, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 20, Iteration 49, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 164, Action Source: Exploration\n", "Episode 20, Iteration 50, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 168, Action Source: Model Prediction\n", "Episode 20, Iteration 51, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 172, Action Source: Model Prediction\n", "Episode 20, Iteration 52, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 173, Action Source: Exploitation\n", "Episode 20, Iteration 53, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 174, Action Source: Exploration\n", "Episode 20, Iteration 54, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 175, Action Source: Model Prediction\n", "Episode 20, Iteration 55, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 176, Action Source: Model Prediction\n", "Episode 20, Iteration 56, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 177, Action Source: Model Prediction\n", "Episode 20, Iteration 57, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 178, Action Source: Model Prediction\n", "Episode 20, Iteration 58, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 179, Action Source: Model Prediction\n", "Episode 20, Iteration 59, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 180, Action Source: Model Prediction\n", "Episode 20, Iteration 60, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 181, Action Source: Exploration\n", "Episode: 20 Best Action: 0 Best evaluation action: 2\n", "Episode: 20 Score: 181 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 21, Iteration 1, State: (4.0, 16.0, 123.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 21, Iteration 2, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 21, Iteration 3, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 21, Iteration 4, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 21, Iteration 5, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 21, Iteration 6, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 21, Iteration 7, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 21, Iteration 8, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 25, Action Source: Exploration\n", "Episode 21, Iteration 9, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 29, Action Source: Model Prediction\n", "Episode 21, Iteration 10, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 21, Iteration 11, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 37, Action Source: Model Prediction\n", "Episode 21, Iteration 12, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 21, Iteration 13, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 43, Action Source: Exploration\n", "Episode 21, Iteration 14, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 47, Action Source: Exploration\n", "Episode 21, Iteration 15, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 21, Iteration 16, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 21, Iteration 17, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 21, Iteration 18, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 21, Iteration 19, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 21, Iteration 20, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 21, Iteration 21, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 21, Iteration 22, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 21, Iteration 23, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 21, Iteration 24, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 21, Iteration 25, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 77, Action Source: Exploration\n", "Episode 21, Iteration 26, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 21, Iteration 27, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 21, Iteration 28, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 21, Iteration 29, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 21, Iteration 30, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 21, Iteration 31, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 21, Iteration 32, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 88.0), Reward: 2, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 21, Iteration 33, State: (5.0, 16.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 21, Iteration 34, State: (4.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 21, Iteration 35, State: (4.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 21, Iteration 36, State: (4.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 21, Iteration 37, State: (4.0, 15.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 88.0), Reward: 2, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 21, Iteration 38, State: (5.0, 16.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 21, Iteration 39, State: (4.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 21, Iteration 40, State: (4.0, 15.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 88.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 21, Iteration 41, State: (5.0, 16.0, 135.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 140.0, 89.0), Reward: 2, , Cumulative Score: 104, Action Source: Exploration\n", "Episode 21, Iteration 42, State: (5.0, 17.0, 140.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 21, Iteration 43, State: (4.0, 16.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 21, Iteration 44, State: (4.0, 16.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 21, Iteration 45, State: (4.0, 16.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 21, Iteration 46, State: (4.0, 16.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 88.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 21, Iteration 47, State: (4.0, 16.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 130.0, 87.0), Reward: 3, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 21, Iteration 48, State: (3.0, 15.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 21, Iteration 49, State: (3.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 21, Iteration 50, State: (3.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 21, Iteration 51, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 21, Iteration 52, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 139, Action Source: Exploration\n", "Episode 21, Iteration 53, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 21, Iteration 54, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 21, Iteration 55, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 21, Iteration 56, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 151, Action Source: Model Prediction\n", "Episode 21, Iteration 57, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 21, Iteration 58, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 155, Action Source: Exploration\n", "Episode 21, Iteration 59, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 157, Action Source: Exploration\n", "Episode 21, Iteration 60, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 159, Action Source: Exploration\n", "Episode: 21 Best Action: 0 Best evaluation action: 0\n", "Episode: 21 Score: 159 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0 Best Action Source: Exploration\n", "Episode 22, Iteration 1, State: (3.0, 16.0, 117.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 22, Iteration 2, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 22, Iteration 3, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 22, Iteration 4, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 7, Action Source: Exploration\n", "Episode 22, Iteration 5, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 22, Iteration 6, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 22, Iteration 7, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 22, Iteration 8, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 11, Action Source: Model Prediction\n", "Episode 22, Iteration 9, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 22, Iteration 10, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 22, Iteration 11, State: (5.0, 18.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 22, Iteration 12, State: (5.0, 18.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 22, Iteration 13, State: (5.0, 18.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 22, Iteration 14, State: (5.0, 18.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 27, Action Source: Exploration\n", "Episode 22, Iteration 15, State: (5.0, 18.0, 130.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 22, Iteration 16, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 29, Action Source: Model Prediction\n", "Episode 22, Iteration 17, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 22, Iteration 18, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 31, Action Source: Model Prediction\n", "Episode 22, Iteration 19, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 22, Iteration 20, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 91.0), Reward: 1, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 22, Iteration 21, State: (4.0, 17.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 22, Iteration 22, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 37, Action Source: Model Prediction\n", "Episode 22, Iteration 23, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 39, Action Source: Exploration\n", "Episode 22, Iteration 24, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 22, Iteration 25, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 22, Iteration 26, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 22, Iteration 27, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 22, Iteration 28, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 22, Iteration 29, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 22, Iteration 30, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 22, Iteration 31, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 22, Iteration 32, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 22, Iteration 33, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 22, Iteration 34, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 22, Iteration 35, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 22, Iteration 36, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 81, Action Source: Exploitation\n", "Episode 22, Iteration 37, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 84, Action Source: Exploitation\n", "Episode 22, Iteration 38, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 22, Iteration 39, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 22, Iteration 40, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 22, Iteration 41, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 22, Iteration 42, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 22, Iteration 43, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 22, Iteration 44, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 110, Action Source: Exploitation\n", "Episode 22, Iteration 45, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 114, Action Source: Exploitation\n", "Episode 22, Iteration 46, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 118, Action Source: Exploitation\n", "Episode 22, Iteration 47, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 22, Iteration 48, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 22, Iteration 49, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 22, Iteration 50, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 22, Iteration 51, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 22, Iteration 52, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 22, Iteration 53, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 22, Iteration 54, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 149, Action Source: Exploitation\n", "Episode 22, Iteration 55, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 153, Action Source: Model Prediction\n", "Episode 22, Iteration 56, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 22, Iteration 57, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 22, Iteration 58, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 163, Action Source: Model Prediction\n", "Episode 22, Iteration 59, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 165, Action Source: Model Prediction\n", "Episode 22, Iteration 60, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 167, Action Source: Model Prediction\n", "Episode: 22 Best Action: 0 Best evaluation action: 0\n", "Episode: 22 Score: 167 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 23, Iteration 1, State: (5.0, 16.0, 118.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 23, Iteration 2, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 23, Iteration 3, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 7, Action Source: Exploration\n", "Episode 23, Iteration 4, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 23, Iteration 5, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 13, Action Source: Model Prediction\n", "Episode 23, Iteration 6, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 23, Iteration 7, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 19, Action Source: Model Prediction\n", "Episode 23, Iteration 8, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 23, Iteration 9, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 23, Iteration 10, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 23, Iteration 11, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 23, Iteration 12, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 23, Iteration 13, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 31, Action Source: Model Prediction\n", "Episode 23, Iteration 14, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 23, Iteration 15, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 23, Iteration 16, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 23, Iteration 17, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 23, Iteration 18, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 23, Iteration 19, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 23, Iteration 20, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 23, Iteration 21, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 23, Iteration 22, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 23, Iteration 23, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 23, Iteration 24, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 74, Action Source: Exploitation\n", "Episode 23, Iteration 25, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 23, Iteration 26, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 23, Iteration 27, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 23, Iteration 28, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 23, Iteration 29, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 23, Iteration 30, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 23, Iteration 31, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 23, Iteration 32, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 23, Iteration 33, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 23, Iteration 34, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 23, Iteration 35, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 23, Iteration 36, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 23, Iteration 37, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 23, Iteration 38, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 23, Iteration 39, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 11.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 23, Iteration 40, State: (5.0, 11.0, 130.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 23, Iteration 41, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 23, Iteration 42, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 23, Iteration 43, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 11.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 23, Iteration 44, State: (5.0, 11.0, 130.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 11.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 109, Action Source: Exploration\n", "Episode 23, Iteration 45, State: (5.0, 11.0, 130.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 23, Iteration 46, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 23, Iteration 47, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 23, Iteration 48, State: (4.0, 10.0, 125.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 23, Iteration 49, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 23, Iteration 50, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 23, Iteration 51, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 23, Iteration 52, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 23, Iteration 53, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 124, Action Source: Exploration\n", "Episode 23, Iteration 54, State: (3.0, 7.0, 120.0, 80.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 23, Iteration 55, State: (4.0, 8.0, 125.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 9.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 23, Iteration 56, State: (5.0, 9.0, 130.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 10.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 23, Iteration 57, State: (5.0, 10.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 10.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 23, Iteration 58, State: (5.0, 10.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 10.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 23, Iteration 59, State: (5.0, 10.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 10.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 23, Iteration 60, State: (5.0, 10.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 10.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode: 23 Best Action: 0 Best evaluation action: 2\n", "Episode: 23 Score: 131 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 24, Iteration 1, State: (5.0, 15.0, 123.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 1, Action Source: Exploration\n", "Episode 24, Iteration 2, State: (5.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 24, Iteration 3, State: (5.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 24, Iteration 4, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 24, Iteration 5, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 24, Iteration 6, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 24, Iteration 7, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 24, Iteration 8, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 24, Iteration 9, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 24, Iteration 10, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 24, Iteration 11, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 26, Action Source: Exploitation\n", "Episode 24, Iteration 12, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 29, Action Source: Model Prediction\n", "Episode 24, Iteration 13, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 24, Iteration 14, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 35, Action Source: Model Prediction\n", "Episode 24, Iteration 15, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 24, Iteration 16, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 39, Action Source: Exploration\n", "Episode 24, Iteration 17, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 24, Iteration 18, State: (5.0, 14.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 24, Iteration 19, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 24, Iteration 20, State: (5.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 24, Iteration 21, State: (5.0, 14.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 24, Iteration 22, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 24, Iteration 23, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 24, Iteration 24, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 24, Iteration 25, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 24, Iteration 26, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 24, Iteration 27, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 24, Iteration 28, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 24, Iteration 29, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 24, Iteration 30, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 24, Iteration 31, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 24, Iteration 32, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 64, Action Source: Exploitation\n", "Episode 24, Iteration 33, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 24, Iteration 34, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 24, Iteration 35, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 24, Iteration 36, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 24, Iteration 37, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 24, Iteration 38, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 24, Iteration 39, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 24, Iteration 40, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 24, Iteration 41, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 24, Iteration 42, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 24, Iteration 43, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 100, Action Source: Exploitation\n", "Episode 24, Iteration 44, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 24, Iteration 45, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 24, Iteration 46, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 24, Iteration 47, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 24, Iteration 48, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 24, Iteration 49, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 24, Iteration 50, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 24, Iteration 51, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 124, Action Source: Exploration\n", "Episode 24, Iteration 52, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 24, Iteration 53, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 128, Action Source: Exploration\n", "Episode 24, Iteration 54, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 130, Action Source: Exploration\n", "Episode 24, Iteration 55, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 24, Iteration 56, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 24, Iteration 57, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 24, Iteration 58, State: (3.0, 9.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 138, Action Source: Exploration\n", "Episode 24, Iteration 59, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 24, Iteration 60, State: (3.0, 8.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 142, Action Source: Exploration\n", "Episode: 24 Best Action: 0 Best evaluation action: 2\n", "Episode: 24 Score: 142 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 25, Iteration 1, State: (5.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 88.0), Reward: 1, , Cumulative Score: 1, Action Source: Model Prediction\n", "Episode 25, Iteration 2, State: (5.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 25, Iteration 3, State: (4.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 25, Iteration 4, State: (5.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 5, Action Source: Exploration\n", "Episode 25, Iteration 5, State: (5.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 7, Action Source: Exploration\n", "Episode 25, Iteration 6, State: (4.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 25, Iteration 7, State: (4.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 11, Action Source: Model Prediction\n", "Episode 25, Iteration 8, State: (4.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 13, Action Source: Model Prediction\n", "Episode 25, Iteration 9, State: (4.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 25, Iteration 10, State: (4.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 19, Action Source: Model Prediction\n", "Episode 25, Iteration 11, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 25, Iteration 12, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 25, Iteration 13, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 31, Action Source: Exploitation\n", "Episode 25, Iteration 14, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 35, Action Source: Model Prediction\n", "Episode 25, Iteration 15, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 39, Action Source: Exploitation\n", "Episode 25, Iteration 16, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 25, Iteration 17, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 25, Iteration 18, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 25, Iteration 19, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 54, Action Source: Exploitation\n", "Episode 25, Iteration 20, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 25, Iteration 21, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 25, Iteration 22, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 25, Iteration 23, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 25, Iteration 24, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 25, Iteration 25, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 25, Iteration 26, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 25, Iteration 27, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 25, Iteration 28, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 25, Iteration 29, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 25, Iteration 30, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 25, Iteration 31, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 25, Iteration 32, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 25, Iteration 33, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 25, Iteration 34, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 25, Iteration 35, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 25, Iteration 36, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 94, Action Source: Exploitation\n", "Episode 25, Iteration 37, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 25, Iteration 38, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 25, Iteration 39, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 25, Iteration 40, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 25, Iteration 41, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 25, Iteration 42, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 25, Iteration 43, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 25, Iteration 44, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 25, Iteration 45, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 25, Iteration 46, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 25, Iteration 47, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 25, Iteration 48, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 25, Iteration 49, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 25, Iteration 50, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 25, Iteration 51, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 25, Iteration 52, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 25, Iteration 53, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 25, Iteration 54, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 25, Iteration 55, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 25, Iteration 56, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 25, Iteration 57, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 25, Iteration 58, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 25, Iteration 59, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 25, Iteration 60, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode: 25 Best Action: 0 Best evaluation action: 2\n", "Episode: 25 Score: 132 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0 Best Action Source: Model Prediction\n", "Episode 26, Iteration 1, State: (3.0, 15.0, 116.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 26, Iteration 2, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 26, Iteration 3, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 26, Iteration 4, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 26, Iteration 5, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 26, Iteration 6, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 26, Iteration 7, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 26, Iteration 8, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 26, Iteration 9, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 26, Iteration 10, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 26, Iteration 11, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 26, Iteration 12, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 26, Iteration 13, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 26, Iteration 14, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 26, Iteration 15, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 26, Iteration 16, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 26, Iteration 17, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 26, Iteration 18, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 26, Iteration 19, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 26, Iteration 20, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 26, Iteration 21, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 26, Iteration 22, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 26, Iteration 23, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 26, Iteration 24, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 26, Iteration 25, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 26, Iteration 26, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 26, Iteration 27, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 26, Iteration 28, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 26, Iteration 29, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 26, Iteration 30, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 26, Iteration 31, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 26, Iteration 32, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 26, Iteration 33, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 26, Iteration 34, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 87, Action Source: Exploitation\n", "Episode 26, Iteration 35, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 26, Iteration 36, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 26, Iteration 37, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 26, Iteration 38, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 26, Iteration 39, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 26, Iteration 40, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 26, Iteration 41, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 26, Iteration 42, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 26, Iteration 43, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 26, Iteration 44, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 26, Iteration 45, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 26, Iteration 46, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 26, Iteration 47, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 26, Iteration 48, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 26, Iteration 49, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 26, Iteration 50, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 26, Iteration 51, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 132, Action Source: Exploration\n", "Episode 26, Iteration 52, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 26, Iteration 53, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode 26, Iteration 54, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 141, Action Source: Exploration\n", "Episode 26, Iteration 55, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 144, Action Source: Exploration\n", "Episode 26, Iteration 56, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 147, Action Source: Model Prediction\n", "Episode 26, Iteration 57, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 150, Action Source: Model Prediction\n", "Episode 26, Iteration 58, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 26, Iteration 59, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 26, Iteration 60, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode: 26 Best Action: 0 Best evaluation action: 0\n", "Episode: 26 Score: 159 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 27, Iteration 1, State: (4.0, 16.0, 115.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 27, Iteration 2, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 27, Iteration 3, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 27, Iteration 4, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 27, Iteration 5, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 27, Iteration 6, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 27, Iteration 7, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 27, Iteration 8, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 27, Iteration 9, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 27, Iteration 10, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 27, Iteration 11, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 27, Iteration 12, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 27, Iteration 13, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 39, Action Source: Exploration\n", "Episode 27, Iteration 14, State: (5.0, 17.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 27, Iteration 15, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 27, Iteration 16, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 27, Iteration 17, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 27, Iteration 18, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 27, Iteration 19, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 27, Iteration 20, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 27, Iteration 21, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 27, Iteration 22, State: (4.0, 17.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 27, Iteration 23, State: (5.0, 18.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 27, Iteration 24, State: (5.0, 19.0, 135.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 27, Iteration 25, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 27, Iteration 26, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 27, Iteration 27, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 27, Iteration 28, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 27, Iteration 29, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 71, Action Source: Exploration\n", "Episode 27, Iteration 30, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 27, Iteration 31, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 27, Iteration 32, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 27, Iteration 33, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 27, Iteration 34, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 27, Iteration 35, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 27, Iteration 36, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 27, Iteration 37, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 27, Iteration 38, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 27, Iteration 39, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 27, Iteration 40, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 27, Iteration 41, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 27, Iteration 42, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 27, Iteration 43, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 27, Iteration 44, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 27, Iteration 45, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 27, Iteration 46, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 27, Iteration 47, State: (4.0, 19.0, 135.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 27, Iteration 48, State: (5.0, 20.0, 140.0, 93.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 27, Iteration 49, State: (5.0, 21.0, 145.0, 94.0), Agent Action: 0, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 27, Iteration 50, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 27, Iteration 51, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 27, Iteration 52, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 27, Iteration 53, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 27, Iteration 54, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 27, Iteration 55, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 27, Iteration 56, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 19.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 27, Iteration 57, State: (3.0, 19.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 19.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 27, Iteration 58, State: (3.0, 19.0, 135.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 27, Iteration 59, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 20.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 27, Iteration 60, State: (4.0, 20.0, 140.0, 93.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 104, Action Source: Exploration\n", "Episode: 27 Best Action: 1 Best evaluation action: 2\n", "Episode: 27 Score: 104 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 89.0 Best Action Source: Exploration\n", "Episode 28, Iteration 1, State: (4.0, 15.0, 119.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 28, Iteration 2, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 28, Iteration 3, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 28, Iteration 4, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 28, Iteration 5, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 28, Iteration 6, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 28, Iteration 7, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 28, Iteration 8, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 28, Iteration 9, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 28, Iteration 10, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 28, Iteration 11, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 28, Iteration 12, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 28, Iteration 13, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 28, Iteration 14, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 28, Iteration 15, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 28, Iteration 16, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 28, Iteration 17, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 28, Iteration 18, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 28, Iteration 19, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 28, Iteration 20, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 28, Iteration 21, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 28, Iteration 22, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 67, Action Source: Exploitation\n", "Episode 28, Iteration 23, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 28, Iteration 24, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 28, Iteration 25, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 28, Iteration 26, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 28, Iteration 27, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 28, Iteration 28, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 28, Iteration 29, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 28, Iteration 30, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 28, Iteration 31, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 28, Iteration 32, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 28, Iteration 33, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 28, Iteration 34, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 28, Iteration 35, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 28, Iteration 36, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 28, Iteration 37, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 28, Iteration 38, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 28, Iteration 39, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 28, Iteration 40, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 28, Iteration 41, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 28, Iteration 42, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 129, Action Source: Exploration\n", "Episode 28, Iteration 43, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 28, Iteration 44, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 28, Iteration 45, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 140, Action Source: Exploitation\n", "Episode 28, Iteration 46, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 28, Iteration 47, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 148, Action Source: Exploitation\n", "Episode 28, Iteration 48, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 28, Iteration 49, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 156, Action Source: Exploitation\n", "Episode 28, Iteration 50, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 160, Action Source: Exploitation\n", "Episode 28, Iteration 51, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 164, Action Source: Model Prediction\n", "Episode 28, Iteration 52, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 168, Action Source: Exploration\n", "Episode 28, Iteration 53, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 172, Action Source: Model Prediction\n", "Episode 28, Iteration 54, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 175, Action Source: Exploration\n", "Episode 28, Iteration 55, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 178, Action Source: Model Prediction\n", "Episode 28, Iteration 56, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 180, Action Source: Exploration\n", "Episode 28, Iteration 57, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 182, Action Source: Exploration\n", "Episode 28, Iteration 58, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 184, Action Source: Model Prediction\n", "Episode 28, Iteration 59, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 185, Action Source: Exploration\n", "Episode 28, Iteration 60, State: (4.0, 11.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 186, Action Source: Model Prediction\n", "Episode: 28 Best Action: 0 Best evaluation action: 0\n", "Episode: 28 Score: 186 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploitation\n", "Episode 29, Iteration 1, State: (4.0, 15.0, 117.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 29, Iteration 2, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 29, Iteration 3, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 29, Iteration 4, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 13, Action Source: Exploration\n", "Episode 29, Iteration 5, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 29, Iteration 6, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 29, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 29, Iteration 8, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 29, Iteration 9, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 29, Iteration 10, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 29, Iteration 11, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 29, Iteration 12, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 29, Iteration 13, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 29, Iteration 14, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 29, Iteration 15, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 49, Action Source: Exploration\n", "Episode 29, Iteration 16, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 53, Action Source: Exploitation\n", "Episode 29, Iteration 17, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 57, Action Source: Exploitation\n", "Episode 29, Iteration 18, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 29, Iteration 19, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 29, Iteration 20, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 69, Action Source: Exploitation\n", "Episode 29, Iteration 21, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 29, Iteration 22, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 77, Action Source: Exploitation\n", "Episode 29, Iteration 23, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80, Action Source: Exploitation\n", "Episode 29, Iteration 24, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 29, Iteration 25, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 29, Iteration 26, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 29, Iteration 27, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 29, Iteration 28, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 29, Iteration 29, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 29, Iteration 30, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 29, Iteration 31, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 29, Iteration 32, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 29, Iteration 33, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 29, Iteration 34, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 29, Iteration 35, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 29, Iteration 36, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 29, Iteration 37, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 29, Iteration 38, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 130, Action Source: Exploration\n", "Episode 29, Iteration 39, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 29, Iteration 40, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 29, Iteration 41, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 29, Iteration 42, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 29, Iteration 43, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 29, Iteration 44, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 29, Iteration 45, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 150, Action Source: Exploration\n", "Episode 29, Iteration 46, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 153, Action Source: Model Prediction\n", "Episode 29, Iteration 47, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 29, Iteration 48, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 29, Iteration 49, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 162, Action Source: Exploration\n", "Episode 29, Iteration 50, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 165, Action Source: Model Prediction\n", "Episode 29, Iteration 51, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 168, Action Source: Exploration\n", "Episode 29, Iteration 52, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 171, Action Source: Exploration\n", "Episode 29, Iteration 53, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 174, Action Source: Model Prediction\n", "Episode 29, Iteration 54, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 177, Action Source: Exploration\n", "Episode 29, Iteration 55, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 180, Action Source: Model Prediction\n", "Episode 29, Iteration 56, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 183, Action Source: Model Prediction\n", "Episode 29, Iteration 57, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 186, Action Source: Model Prediction\n", "Episode 29, Iteration 58, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 189, Action Source: Exploration\n", "Episode 29, Iteration 59, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 192, Action Source: Model Prediction\n", "Episode 29, Iteration 60, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 195, Action Source: Model Prediction\n", "Episode: 29 Best Action: 0 Best evaluation action: 2\n", "Episode: 29 Score: 195 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 30, Iteration 1, State: (3.0, 15.0, 123.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 30, Iteration 2, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 30, Iteration 3, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 30, Iteration 4, State: (3.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 13, Action Source: Exploration\n", "Episode 30, Iteration 5, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 30, Iteration 6, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 30, Iteration 7, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 30, Iteration 8, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 30, Iteration 9, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 30, Iteration 10, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 33, Action Source: Exploration\n", "Episode 30, Iteration 11, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 30, Iteration 12, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 30, Iteration 13, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 30, Iteration 14, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 30, Iteration 15, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 30, Iteration 16, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 30, Iteration 17, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 55, Action Source: Exploitation\n", "Episode 30, Iteration 18, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 30, Iteration 19, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 30, Iteration 20, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 30, Iteration 21, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 30, Iteration 22, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 30, Iteration 23, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 30, Iteration 24, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 30, Iteration 25, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 30, Iteration 26, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 30, Iteration 27, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 94, Action Source: Exploitation\n", "Episode 30, Iteration 28, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 98, Action Source: Exploitation\n", "Episode 30, Iteration 29, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 101, Action Source: Exploitation\n", "Episode 30, Iteration 30, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 30, Iteration 31, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 30, Iteration 32, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 111, Action Source: Exploitation\n", "Episode 30, Iteration 33, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 115, Action Source: Exploitation\n", "Episode 30, Iteration 34, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 30, Iteration 35, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 30, Iteration 36, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 30, Iteration 37, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 131, Action Source: Exploitation\n", "Episode 30, Iteration 38, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 134, Action Source: Exploitation\n", "Episode 30, Iteration 39, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 30, Iteration 40, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 30, Iteration 41, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 144, Action Source: Exploration\n", "Episode 30, Iteration 42, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 148, Action Source: Exploitation\n", "Episode 30, Iteration 43, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 152, Action Source: Exploitation\n", "Episode 30, Iteration 44, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 156, Action Source: Exploitation\n", "Episode 30, Iteration 45, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 160, Action Source: Exploitation\n", "Episode 30, Iteration 46, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 164, Action Source: Exploitation\n", "Episode 30, Iteration 47, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 168, Action Source: Exploitation\n", "Episode 30, Iteration 48, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 172, Action Source: Exploitation\n", "Episode 30, Iteration 49, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 176, Action Source: Model Prediction\n", "Episode 30, Iteration 50, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 180, Action Source: Model Prediction\n", "Episode 30, Iteration 51, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 184, Action Source: Model Prediction\n", "Episode 30, Iteration 52, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 188, Action Source: Exploration\n", "Episode 30, Iteration 53, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 190, Action Source: Exploration\n", "Episode 30, Iteration 54, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 191, Action Source: Exploration\n", "Episode 30, Iteration 55, State: (5.0, 15.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 192, Action Source: Model Prediction\n", "Episode 30, Iteration 56, State: (5.0, 15.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 193, Action Source: Model Prediction\n", "Episode 30, Iteration 57, State: (5.0, 15.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 194, Action Source: Model Prediction\n", "Episode 30, Iteration 58, State: (5.0, 15.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 196, Action Source: Exploration\n", "Episode 30, Iteration 59, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 198, Action Source: Exploration\n", "Episode 30, Iteration 60, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 200, Action Source: Model Prediction\n", "Episode: 30 Best Action: 0 Best evaluation action: 0\n", "Episode: 30 Score: 200 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 31, Iteration 1, State: (4.0, 14.0, 115.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 31, Iteration 2, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 31, Iteration 3, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 31, Iteration 4, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 31, Iteration 5, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 31, Iteration 6, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 31, Iteration 7, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 31, Iteration 8, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 31, Iteration 9, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 31, Iteration 10, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 31, Iteration 11, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 31, Iteration 12, State: (5.0, 16.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 26, Action Source: Exploration\n", "Episode 31, Iteration 13, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 31, Iteration 14, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 31, Iteration 15, State: (5.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 35, Action Source: Model Prediction\n", "Episode 31, Iteration 16, State: (5.0, 16.0, 125.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 31, Iteration 17, State: (5.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 31, Iteration 18, State: (5.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 31, Iteration 19, State: (5.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 31, Iteration 20, State: (5.0, 17.0, 130.0, 92.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 49, Action Source: Exploration\n", "Episode 31, Iteration 21, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 31, Iteration 22, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 31, Iteration 23, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 31, Iteration 24, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 31, Iteration 25, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 31, Iteration 26, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 31, Iteration 27, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 31, Iteration 28, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 92.0), Reward: 1, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 31, Iteration 29, State: (4.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 130.0, 92.0), Reward: 1, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 31, Iteration 30, State: (4.0, 17.0, 130.0, 92.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 31, Iteration 31, State: (3.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 31, Iteration 32, State: (3.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 31, Iteration 33, State: (3.0, 16.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 31, Iteration 34, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 31, Iteration 35, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 31, Iteration 36, State: (3.0, 15.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 31, Iteration 37, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 31, Iteration 38, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 31, Iteration 39, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 31, Iteration 40, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 31, Iteration 41, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 31, Iteration 42, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 31, Iteration 43, State: (4.0, 16.0, 125.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 31, Iteration 44, State: (5.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 31, Iteration 45, State: (5.0, 17.0, 130.0, 92.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 31, Iteration 46, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 31, Iteration 47, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 31, Iteration 48, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 31, Iteration 49, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 92.0), Reward: 1, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 31, Iteration 50, State: (4.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 130.0, 92.0), Reward: 1, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 31, Iteration 51, State: (4.0, 17.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 130.0, 92.0), Reward: 1, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 31, Iteration 52, State: (4.0, 17.0, 130.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 31, Iteration 53, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 31, Iteration 54, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 31, Iteration 55, State: (5.0, 18.0, 135.0, 93.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 140.0, 94.0), Reward: 1, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 31, Iteration 56, State: (5.0, 19.0, 140.0, 94.0), Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 31, Iteration 57, State: (4.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 18.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 31, Iteration 58, State: (4.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 18.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 31, Iteration 59, State: (4.0, 18.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 18.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 31, Iteration 60, State: (4.0, 18.0, 135.0, 93.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 140.0, 94.0), Reward: 1, , Cumulative Score: 120, Action Source: Exploration\n", "Episode: 31 Best Action: 2 Best evaluation action: 2\n", "Episode: 31 Score: 120 Best Reward: 3 Gsize State: 5.0 Bratio State: 1:16.0 Btime State: 125.0 convert: 2 minutes 5 seconds Temperature State: 91.0 Best Action Source: Exploration\n", "Episode 32, Iteration 1, State: (5.0, 15.0, 122.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 32, Iteration 2, State: (5.0, 15.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 5, Action Source: Model Prediction\n", "Episode 32, Iteration 3, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 32, Iteration 4, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 32, Iteration 5, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 32, Iteration 6, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 32, Iteration 7, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 17, Action Source: Exploration\n", "Episode 32, Iteration 8, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 19, Action Source: Model Prediction\n", "Episode 32, Iteration 9, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 32, Iteration 10, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 32, Iteration 11, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 32, Iteration 12, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 32, Iteration 13, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 32, Iteration 14, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 32, Iteration 15, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 32, Iteration 16, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 32, Iteration 17, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 32, Iteration 18, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 32, Iteration 19, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 32, Iteration 20, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 32, Iteration 21, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 32, Iteration 22, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 32, Iteration 23, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 32, Iteration 24, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 32, Iteration 25, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 32, Iteration 26, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 32, Iteration 27, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 32, Iteration 28, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 32, Iteration 29, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 32, Iteration 30, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 82, Action Source: Exploration\n", "Episode 32, Iteration 31, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 32, Iteration 32, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 89, Action Source: Exploitation\n", "Episode 32, Iteration 33, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 32, Iteration 34, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 32, Iteration 35, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 32, Iteration 36, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100, Action Source: Exploration\n", "Episode 32, Iteration 37, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 32, Iteration 38, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 32, Iteration 39, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 32, Iteration 40, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 32, Iteration 41, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 32, Iteration 42, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 32, Iteration 43, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 32, Iteration 44, State: (5.0, 16.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 32, Iteration 45, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 32, Iteration 46, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 32, Iteration 47, State: (4.0, 15.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 132, Action Source: Exploration\n", "Episode 32, Iteration 48, State: (3.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 32, Iteration 49, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 139, Action Source: Exploration\n", "Episode 32, Iteration 50, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 32, Iteration 51, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 32, Iteration 52, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 148, Action Source: Exploration\n", "Episode 32, Iteration 53, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 32, Iteration 54, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 32, Iteration 55, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 32, Iteration 56, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 164, Action Source: Model Prediction\n", "Episode 32, Iteration 57, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 168, Action Source: Exploitation\n", "Episode 32, Iteration 58, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 172, Action Source: Exploration\n", "Episode 32, Iteration 59, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 176, Action Source: Model Prediction\n", "Episode 32, Iteration 60, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 180, Action Source: Model Prediction\n", "Episode: 32 Best Action: 0 Best evaluation action: 0\n", "Episode: 32 Score: 180 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 33, Iteration 1, State: (4.0, 15.0, 115.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 91.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 33, Iteration 2, State: (5.0, 16.0, 120.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 91.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 33, Iteration 3, State: (5.0, 16.0, 120.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 91.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 33, Iteration 4, State: (5.0, 16.0, 120.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 33, Iteration 5, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 33, Iteration 6, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 33, Iteration 7, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 33, Iteration 8, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 33, Iteration 9, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 33, Iteration 10, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 33, Iteration 11, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 33, Iteration 12, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 33, Iteration 13, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 33, Iteration 14, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 33, Iteration 15, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 33, Iteration 16, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 53, Action Source: Exploitation\n", "Episode 33, Iteration 17, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 33, Iteration 18, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 33, Iteration 19, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 33, Iteration 20, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 33, Iteration 21, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 33, Iteration 22, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 33, Iteration 23, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 33, Iteration 24, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 77, Action Source: Exploration\n", "Episode 33, Iteration 25, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 33, Iteration 26, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 33, Iteration 27, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 33, Iteration 28, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 33, Iteration 29, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 33, Iteration 30, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 33, Iteration 31, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 33, Iteration 32, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 33, Iteration 33, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 33, Iteration 34, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 33, Iteration 35, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 33, Iteration 36, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 33, Iteration 37, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 33, Iteration 38, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 33, Iteration 39, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 33, Iteration 40, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 33, Iteration 41, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 33, Iteration 42, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 33, Iteration 43, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 33, Iteration 44, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 33, Iteration 45, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 33, Iteration 46, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 7.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 33, Iteration 47, State: (4.0, 7.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 7.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 33, Iteration 48, State: (4.0, 7.0, 125.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 33, Iteration 49, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 33, Iteration 50, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 33, Iteration 51, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 33, Iteration 52, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 33, Iteration 53, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 33, Iteration 54, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 137, Action Source: Exploration\n", "Episode 33, Iteration 55, State: (3.0, 4.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 33, Iteration 56, State: (3.0, 4.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 33, Iteration 57, State: (3.0, 4.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 143, Action Source: Model Prediction\n", "Episode 33, Iteration 58, State: (3.0, 4.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 33, Iteration 59, State: (3.0, 4.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 147, Action Source: Model Prediction\n", "Episode 33, Iteration 60, State: (3.0, 4.0, 120.0, 79.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 4.0, 120.0, 79.0), Reward: 2, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode: 33 Best Action: 0 Best evaluation action: 0\n", "Episode: 33 Score: 149 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Model Prediction\n", "Episode 34, Iteration 1, State: (4.0, 15.0, 118.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 34, Iteration 2, State: (5.0, 16.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 34, Iteration 3, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 34, Iteration 4, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 34, Iteration 5, State: (5.0, 16.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 34, Iteration 6, State: (4.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 34, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 34, Iteration 8, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 34, Iteration 9, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 31, Action Source: Model Prediction\n", "Episode 34, Iteration 10, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 34, Action Source: Exploitation\n", "Episode 34, Iteration 11, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 37, Action Source: Exploration\n", "Episode 34, Iteration 12, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 34, Iteration 13, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 43, Action Source: Exploration\n", "Episode 34, Iteration 14, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 34, Iteration 15, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 34, Iteration 16, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 34, Iteration 17, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 55, Action Source: Exploration\n", "Episode 34, Iteration 18, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 34, Iteration 19, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 34, Iteration 20, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 34, Iteration 21, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 34, Iteration 22, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 34, Iteration 23, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 34, Iteration 24, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 34, Iteration 25, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 34, Iteration 26, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 34, Iteration 27, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 34, Iteration 28, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 34, Iteration 29, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 34, Iteration 30, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 34, Iteration 31, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 34, Iteration 32, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 34, Iteration 33, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 34, Iteration 34, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 34, Iteration 35, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 34, Iteration 36, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 34, Iteration 37, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 34, Iteration 38, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 34, Iteration 39, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 34, Iteration 40, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 34, Iteration 41, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 34, Iteration 42, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 34, Iteration 43, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 34, Iteration 44, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 34, Iteration 45, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 129, Action Source: Exploitation\n", "Episode 34, Iteration 46, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 34, Iteration 47, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 34, Iteration 48, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 34, Iteration 49, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 145, Action Source: Model Prediction\n", "Episode 34, Iteration 50, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 149, Action Source: Exploitation\n", "Episode 34, Iteration 51, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 153, Action Source: Exploitation\n", "Episode 34, Iteration 52, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 34, Iteration 53, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 161, Action Source: Exploration\n", "Episode 34, Iteration 54, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 165, Action Source: Exploitation\n", "Episode 34, Iteration 55, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 169, Action Source: Exploitation\n", "Episode 34, Iteration 56, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 173, Action Source: Exploration\n", "Episode 34, Iteration 57, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 177, Action Source: Model Prediction\n", "Episode 34, Iteration 58, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 181, Action Source: Model Prediction\n", "Episode 34, Iteration 59, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 184, Action Source: Exploitation\n", "Episode 34, Iteration 60, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 185, Action Source: Exploration\n", "Episode: 34 Best Action: 0 Best evaluation action: 2\n", "Episode: 34 Score: 185 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 35, Iteration 1, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 35, Iteration 2, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 35, Iteration 3, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 35, Iteration 4, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 35, Iteration 5, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 35, Iteration 6, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 35, Iteration 7, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 35, Iteration 8, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 35, Iteration 9, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 35, Iteration 10, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 35, Iteration 11, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 35, Iteration 12, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 35, Iteration 13, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 35, Iteration 14, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 35, Iteration 15, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 35, Iteration 16, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 35, Iteration 17, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 35, Iteration 18, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 35, Iteration 19, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 35, Iteration 20, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 35, Iteration 21, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 35, Iteration 22, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 35, Iteration 23, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 35, Iteration 24, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 35, Iteration 25, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 35, Iteration 26, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 35, Iteration 27, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 35, Iteration 28, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 35, Iteration 29, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 35, Iteration 30, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 35, Iteration 31, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 35, Iteration 32, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 35, Iteration 33, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 35, Iteration 34, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 35, Iteration 35, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 35, Iteration 36, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 35, Iteration 37, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 35, Iteration 38, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 35, Iteration 39, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 35, Iteration 40, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 35, Iteration 41, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 35, Iteration 42, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 35, Iteration 43, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 35, Iteration 44, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 35, Iteration 45, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 35, Iteration 46, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 35, Iteration 47, State: (5.0, 12.0, 130.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 35, Iteration 48, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 35, Iteration 49, State: (5.0, 12.0, 130.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 35, Iteration 50, State: (5.0, 12.0, 130.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 12.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 35, Iteration 51, State: (5.0, 12.0, 130.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 35, Iteration 52, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 35, Iteration 53, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 35, Iteration 54, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 35, Iteration 55, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 35, Iteration 56, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 35, Iteration 57, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 35, Iteration 58, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 35, Iteration 59, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 35, Iteration 60, State: (4.0, 11.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode: 35 Best Action: 0 Best evaluation action: 0\n", "Episode: 35 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0 Best Action Source: Exploration\n", "Episode 36, Iteration 1, State: (4.0, 14.0, 124.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Exploration\n", "Episode 36, Iteration 2, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 36, Iteration 3, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 36, Iteration 4, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 36, Iteration 5, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 36, Iteration 6, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 22, Action Source: Exploitation\n", "Episode 36, Iteration 7, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 36, Iteration 8, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 36, Iteration 9, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 36, Iteration 10, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 38, Action Source: Exploitation\n", "Episode 36, Iteration 11, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 36, Iteration 12, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 36, Iteration 13, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 36, Iteration 14, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 36, Iteration 15, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 36, Iteration 16, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 36, Iteration 17, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 36, Iteration 18, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 36, Iteration 19, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 36, Iteration 20, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 36, Iteration 21, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 36, Iteration 22, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 36, Iteration 23, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 36, Iteration 24, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 36, Iteration 25, State: (4.0, 10.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 36, Iteration 26, State: (4.0, 10.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 36, Iteration 27, State: (4.0, 10.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 36, Iteration 28, State: (4.0, 10.0, 125.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 36, Iteration 29, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 36, Iteration 30, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 36, Iteration 31, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 36, Iteration 32, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 36, Iteration 33, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 36, Iteration 34, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 36, Iteration 35, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 36, Iteration 36, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 36, Iteration 37, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 36, Iteration 38, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 36, Iteration 39, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 100, Action Source: Exploration\n", "Episode 36, Iteration 40, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 36, Iteration 41, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 104, Action Source: Exploration\n", "Episode 36, Iteration 42, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 36, Iteration 43, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 7.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 36, Iteration 44, State: (4.0, 7.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 7.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 36, Iteration 45, State: (4.0, 7.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 7.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 36, Iteration 46, State: (4.0, 7.0, 125.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 36, Iteration 47, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 36, Iteration 48, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 36, Iteration 49, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 6.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 36, Iteration 50, State: (4.0, 6.0, 125.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 6.0, 125.0, 81.0), Reward: 1, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 36, Iteration 51, State: (4.0, 6.0, 125.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 7.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 36, Iteration 52, State: (5.0, 7.0, 130.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 7.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 36, Iteration 53, State: (5.0, 7.0, 130.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 7.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 36, Iteration 54, State: (5.0, 7.0, 130.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 7.0, 130.0, 82.0), Reward: 1, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 36, Iteration 55, State: (5.0, 7.0, 130.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 8.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 36, Iteration 56, State: (5.0, 8.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 8.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 36, Iteration 57, State: (5.0, 8.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 8.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 36, Iteration 58, State: (5.0, 8.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 8.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 36, Iteration 59, State: (5.0, 8.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 8.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 36, Iteration 60, State: (5.0, 8.0, 135.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 8.0, 135.0, 83.0), Reward: 1, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode: 36 Best Action: 0 Best evaluation action: 2\n", "Episode: 36 Score: 127 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 37, Iteration 1, State: (3.0, 15.0, 116.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 37, Iteration 2, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 37, Iteration 3, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 37, Iteration 4, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 37, Iteration 5, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 20, Action Source: Exploitation\n", "Episode 37, Iteration 6, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 37, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 28, Action Source: Exploitation\n", "Episode 37, Iteration 8, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 32, Action Source: Exploitation\n", "Episode 37, Iteration 9, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 37, Iteration 10, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 37, Iteration 11, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 37, Iteration 12, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 47, Action Source: Exploitation\n", "Episode 37, Iteration 13, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 37, Iteration 14, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 37, Iteration 15, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 37, Iteration 16, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 37, Iteration 17, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 37, Iteration 18, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 37, Iteration 19, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 37, Iteration 20, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 37, Iteration 21, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 37, Iteration 22, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 77, Action Source: Exploration\n", "Episode 37, Iteration 23, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 37, Iteration 24, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 37, Iteration 25, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 37, Iteration 26, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 37, Iteration 27, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 37, Iteration 28, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 37, Iteration 29, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 37, Iteration 30, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 37, Iteration 31, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 37, Iteration 32, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 107, Action Source: Exploration\n", "Episode 37, Iteration 33, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 37, Iteration 34, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 37, Iteration 35, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 37, Iteration 36, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 37, Iteration 37, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 37, Iteration 38, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 37, Iteration 39, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 37, Iteration 40, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 131, Action Source: Exploration\n", "Episode 37, Iteration 41, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 134, Action Source: Exploration\n", "Episode 37, Iteration 42, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 37, Iteration 43, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 37, Iteration 44, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 37, Iteration 45, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 37, Iteration 46, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 145, Action Source: Exploration\n", "Episode 37, Iteration 47, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 37, Iteration 48, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 151, Action Source: Model Prediction\n", "Episode 37, Iteration 49, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 154, Action Source: Model Prediction\n", "Episode 37, Iteration 50, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 37, Iteration 51, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 37, Iteration 52, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 37, Iteration 53, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 37, Iteration 54, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 37, Iteration 55, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 163, Action Source: Exploration\n", "Episode 37, Iteration 56, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 165, Action Source: Model Prediction\n", "Episode 37, Iteration 57, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 167, Action Source: Model Prediction\n", "Episode 37, Iteration 58, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 169, Action Source: Exploration\n", "Episode 37, Iteration 59, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 171, Action Source: Model Prediction\n", "Episode 37, Iteration 60, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 173, Action Source: Model Prediction\n", "Episode: 37 Best Action: 0 Best evaluation action: 0\n", "Episode: 37 Score: 173 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 38, Iteration 1, State: (4.0, 15.0, 123.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 38, Iteration 2, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 38, Iteration 3, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 38, Iteration 4, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 38, Iteration 5, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 38, Iteration 6, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 38, Iteration 7, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 26, Action Source: Exploration\n", "Episode 38, Iteration 8, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 38, Iteration 9, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 38, Iteration 10, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 38, Iteration 11, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 38, Iteration 12, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 46, Action Source: Exploitation\n", "Episode 38, Iteration 13, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 38, Iteration 14, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 38, Iteration 15, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 38, Iteration 16, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 38, Iteration 17, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 65, Action Source: Exploitation\n", "Episode 38, Iteration 18, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 38, Iteration 19, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 38, Iteration 20, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 38, Iteration 21, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 38, Iteration 22, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 38, Iteration 23, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 85, Action Source: Exploitation\n", "Episode 38, Iteration 24, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 89, Action Source: Exploitation\n", "Episode 38, Iteration 25, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 38, Iteration 26, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 38, Iteration 27, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 38, Iteration 28, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 38, Iteration 29, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 96, Action Source: Exploration\n", "Episode 38, Iteration 30, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 38, Iteration 31, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 38, Iteration 32, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 38, Iteration 33, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 38, Iteration 34, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 38, Iteration 35, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 38, Iteration 36, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 38, Iteration 37, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 38, Iteration 38, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 38, Iteration 39, State: (4.0, 14.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 38, Iteration 40, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 38, Iteration 41, State: (4.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 38, Iteration 42, State: (4.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 38, Iteration 43, State: (4.0, 14.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 128, Action Source: Exploration\n", "Episode 38, Iteration 44, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 38, Iteration 45, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 132, Action Source: Exploration\n", "Episode 38, Iteration 46, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 38, Iteration 47, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 38, Iteration 48, State: (5.0, 15.0, 135.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 139, Action Source: Exploration\n", "Episode 38, Iteration 49, State: (5.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 38, Iteration 50, State: (5.0, 16.0, 140.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 145, Action Source: Exploration\n", "Episode 38, Iteration 51, State: (5.0, 17.0, 145.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 38, Iteration 52, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 38, Iteration 53, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 151, Action Source: Model Prediction\n", "Episode 38, Iteration 54, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 153, Action Source: Model Prediction\n", "Episode 38, Iteration 55, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 155, Action Source: Model Prediction\n", "Episode 38, Iteration 56, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 157, Action Source: Model Prediction\n", "Episode 38, Iteration 57, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 38, Iteration 58, State: (4.0, 16.0, 140.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 162, Action Source: Exploration\n", "Episode 38, Iteration 59, State: (5.0, 17.0, 145.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 165, Action Source: Exploration\n", "Episode 38, Iteration 60, State: (5.0, 17.0, 145.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 168, Action Source: Model Prediction\n", "Episode: 38 Best Action: 0 Best evaluation action: 0\n", "Episode: 38 Score: 168 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 39, Iteration 1, State: (3.0, 16.0, 115.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 39, Iteration 2, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 39, Iteration 3, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 39, Iteration 4, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 39, Iteration 5, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 39, Iteration 6, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 39, Iteration 7, State: (3.0, 16.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 39, Iteration 8, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 39, Iteration 9, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 39, Iteration 10, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 39, Iteration 11, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 39, Iteration 12, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 39, Iteration 13, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 39, Iteration 14, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 39, Iteration 15, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 39, Iteration 16, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 39, Iteration 17, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 39, Iteration 18, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 39, Iteration 19, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 39, Iteration 20, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 39, Iteration 21, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 39, Iteration 22, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 39, Iteration 23, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 39, Iteration 24, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 39, Iteration 25, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 39, Iteration 26, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 39, Iteration 27, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 39, Iteration 28, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 39, Iteration 29, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 39, Iteration 30, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 39, Iteration 31, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 39, Iteration 32, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 39, Iteration 33, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 39, Iteration 34, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 39, Iteration 35, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 39, Iteration 36, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 39, Iteration 37, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 39, Iteration 38, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 39, Iteration 39, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 39, Iteration 40, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 39, Iteration 41, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 39, Iteration 42, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 39, Iteration 43, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 39, Iteration 44, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 39, Iteration 45, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 39, Iteration 46, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 39, Iteration 47, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 39, Iteration 48, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 39, Iteration 49, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 39, Iteration 50, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 39, Iteration 51, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 39, Iteration 52, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 39, Iteration 53, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 39, Iteration 54, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 39, Iteration 55, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 143, Action Source: Model Prediction\n", "Episode 39, Iteration 56, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 146, Action Source: Model Prediction\n", "Episode 39, Iteration 57, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 149, Action Source: Exploration\n", "Episode 39, Iteration 58, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 39, Iteration 59, State: (5.0, 17.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 155, Action Source: Exploration\n", "Episode 39, Iteration 60, State: (4.0, 16.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 158, Action Source: Exploration\n", "Episode: 39 Best Action: 2 Best evaluation action: 0\n", "Episode: 39 Score: 158 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:16.0 Btime State: 125.0 convert: 2 minutes 5 seconds Temperature State: 90.0 Best Action Source: Exploration\n", "Episode 40, Iteration 1, State: (5.0, 15.0, 115.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 89.0), Reward: 1, , Cumulative Score: 1, Action Source: Exploration\n", "Episode 40, Iteration 2, State: (5.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 40, Iteration 3, State: (4.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 5, Action Source: Model Prediction\n", "Episode 40, Iteration 4, State: (5.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 40, Iteration 5, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 11, Action Source: Model Prediction\n", "Episode 40, Iteration 6, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 40, Iteration 7, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 17, Action Source: Exploration\n", "Episode 40, Iteration 8, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 40, Iteration 9, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 40, Iteration 10, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 40, Iteration 11, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 40, Iteration 12, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 37, Action Source: Exploration\n", "Episode 40, Iteration 13, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 40, Iteration 14, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 40, Iteration 15, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 49, Action Source: Exploitation\n", "Episode 40, Iteration 16, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 53, Action Source: Exploration\n", "Episode 40, Iteration 17, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 40, Iteration 18, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 40, Iteration 19, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 40, Iteration 20, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 40, Iteration 21, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 40, Iteration 22, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 59, Action Source: Exploration\n", "Episode 40, Iteration 23, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 40, Iteration 24, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 40, Iteration 25, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 40, Iteration 26, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 40, Iteration 27, State: (5.0, 14.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 40, Iteration 28, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 40, Iteration 29, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 40, Iteration 30, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 40, Iteration 31, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 40, Iteration 32, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 40, Iteration 33, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 75, Action Source: Exploration\n", "Episode 40, Iteration 34, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 40, Iteration 35, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 40, Iteration 36, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 40, Iteration 37, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 40, Iteration 38, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 40, Iteration 39, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 40, Iteration 40, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 40, Iteration 41, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 40, Iteration 42, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 40, Iteration 43, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 40, Iteration 44, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 40, Iteration 45, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 40, Iteration 46, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 40, Iteration 47, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 40, Iteration 48, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 40, Iteration 49, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 40, Iteration 50, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 40, Iteration 51, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 40, Iteration 52, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 40, Iteration 53, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 40, Iteration 54, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 40, Iteration 55, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 40, Iteration 56, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 40, Iteration 57, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 40, Iteration 58, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 40, Iteration 59, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 40, Iteration 60, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode: 40 Best Action: 0 Best evaluation action: 2\n", "Episode: 40 Score: 126 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Model Prediction\n", "Episode 41, Iteration 1, State: (5.0, 15.0, 122.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 41, Iteration 2, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 41, Iteration 3, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 41, Iteration 4, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 41, Iteration 5, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 41, Iteration 6, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 41, Iteration 7, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 41, Iteration 8, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 41, Iteration 9, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 41, Iteration 10, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 41, Iteration 11, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 41, Iteration 12, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 41, Iteration 13, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 41, Iteration 14, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 41, Iteration 15, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 41, Iteration 16, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 41, Iteration 17, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 41, Iteration 18, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 41, Iteration 19, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 41, Iteration 20, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 41, Iteration 21, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 41, Iteration 22, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 41, Iteration 23, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 41, Iteration 24, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 41, Iteration 25, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 41, Iteration 26, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 41, Iteration 27, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 41, Iteration 28, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 41, Iteration 29, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 41, Iteration 30, State: (5.0, 16.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 41, Iteration 31, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 41, Iteration 32, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 41, Iteration 33, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 41, Iteration 34, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 41, Iteration 35, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 41, Iteration 36, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 41, Iteration 37, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 41, Iteration 38, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 41, Iteration 39, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 41, Iteration 40, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 41, Iteration 41, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 41, Iteration 42, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 41, Iteration 43, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 41, Iteration 44, State: (4.0, 13.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 117, Action Source: Exploration\n", "Episode 41, Iteration 45, State: (5.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 41, Iteration 46, State: (5.0, 14.0, 130.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 88.0), Reward: 1, , Cumulative Score: 119, Action Source: Exploration\n", "Episode 41, Iteration 47, State: (5.0, 15.0, 135.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 41, Iteration 48, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 41, Iteration 49, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 41, Iteration 50, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 87.0), Reward: 2, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 41, Iteration 51, State: (4.0, 14.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 131, Action Source: Exploration\n", "Episode 41, Iteration 52, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 41, Iteration 53, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 41, Iteration 54, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 125.0, 86.0), Reward: 4, , Cumulative Score: 143, Action Source: Model Prediction\n", "Episode 41, Iteration 55, State: (3.0, 13.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 146, Action Source: Exploration\n", "Episode 41, Iteration 56, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 41, Iteration 57, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 151, Action Source: Exploration\n", "Episode 41, Iteration 58, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 153, Action Source: Exploration\n", "Episode 41, Iteration 59, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 155, Action Source: Exploration\n", "Episode 41, Iteration 60, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 156, Action Source: Exploration\n", "Episode: 41 Best Action: 0 Best evaluation action: 2\n", "Episode: 41 Score: 156 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 42, Iteration 1, State: (5.0, 14.0, 122.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 42, Iteration 2, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 42, Iteration 3, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 42, Iteration 4, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 42, Iteration 5, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 42, Iteration 6, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 42, Iteration 7, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 42, Iteration 8, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 42, Iteration 9, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 42, Iteration 10, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 42, Iteration 11, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 42, Iteration 12, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 42, Iteration 13, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 42, Iteration 14, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 42, Iteration 15, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 42, Iteration 16, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 42, Iteration 17, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 42, Iteration 18, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 42, Iteration 19, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 42, Iteration 20, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 42, Iteration 21, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 42, Iteration 22, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 42, Iteration 23, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 42, Iteration 24, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 42, Iteration 25, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 42, Iteration 26, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 42, Iteration 27, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 42, Iteration 28, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 42, Iteration 29, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 42, Iteration 30, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 42, Iteration 31, State: (4.0, 14.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 42, Iteration 32, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 42, Iteration 33, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 42, Iteration 34, State: (5.0, 15.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 70, Action Source: Exploration\n", "Episode 42, Iteration 35, State: (5.0, 16.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 42, Iteration 36, State: (5.0, 16.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 42, Iteration 37, State: (5.0, 16.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 42, Iteration 38, State: (5.0, 16.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 82, Action Source: Exploration\n", "Episode 42, Iteration 39, State: (5.0, 16.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 42, Iteration 40, State: (5.0, 16.0, 135.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 42, Iteration 41, State: (4.0, 15.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 42, Iteration 42, State: (3.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 42, Iteration 43, State: (3.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 42, Iteration 44, State: (3.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 42, Iteration 45, State: (3.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 42, Iteration 46, State: (3.0, 14.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 42, Iteration 47, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 42, Iteration 48, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 42, Iteration 49, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 42, Iteration 50, State: (4.0, 14.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 42, Iteration 51, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 121, Action Source: Exploration\n", "Episode 42, Iteration 52, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 42, Iteration 53, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 42, Iteration 54, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 42, Iteration 55, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 42, Iteration 56, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 42, Iteration 57, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 42, Iteration 58, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 42, Iteration 59, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 149, Action Source: Exploration\n", "Episode 42, Iteration 60, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 151, Action Source: Model Prediction\n", "Episode: 42 Best Action: 0 Best evaluation action: 0\n", "Episode: 42 Score: 151 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 43, Iteration 1, State: (3.0, 14.0, 116.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 43, Iteration 2, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 43, Iteration 3, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 43, Iteration 4, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 43, Iteration 5, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 43, Iteration 6, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 43, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 43, Iteration 8, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 31, Action Source: Model Prediction\n", "Episode 43, Iteration 9, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 43, Iteration 10, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 43, Iteration 11, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 43, Iteration 12, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 43, Iteration 13, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 43, Iteration 14, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 43, Iteration 15, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 43, Iteration 16, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 43, Iteration 17, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 43, Iteration 18, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 43, Iteration 19, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 43, Iteration 20, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 43, Iteration 21, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 43, Iteration 22, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 43, Iteration 23, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 43, Iteration 24, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 43, Iteration 25, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 43, Iteration 26, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 43, Iteration 27, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 43, Iteration 28, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 43, Iteration 29, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 90, Action Source: Exploration\n", "Episode 43, Iteration 30, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 43, Iteration 31, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96, Action Source: Exploitation\n", "Episode 43, Iteration 32, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 43, Iteration 33, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 43, Iteration 34, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 43, Iteration 35, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 43, Iteration 36, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 115, Action Source: Exploitation\n", "Episode 43, Iteration 37, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 43, Iteration 38, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 43, Iteration 39, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 43, Iteration 40, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 131, Action Source: Exploitation\n", "Episode 43, Iteration 41, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 135, Action Source: Exploitation\n", "Episode 43, Iteration 42, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 43, Iteration 43, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 143, Action Source: Model Prediction\n", "Episode 43, Iteration 44, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 43, Iteration 45, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 151, Action Source: Exploration\n", "Episode 43, Iteration 46, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 155, Action Source: Model Prediction\n", "Episode 43, Iteration 47, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 43, Iteration 48, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 163, Action Source: Model Prediction\n", "Episode 43, Iteration 49, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 167, Action Source: Exploitation\n", "Episode 43, Iteration 50, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 169, Action Source: Exploration\n", "Episode 43, Iteration 51, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 171, Action Source: Model Prediction\n", "Episode 43, Iteration 52, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 172, Action Source: Exploration\n", "Episode 43, Iteration 53, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 173, Action Source: Exploration\n", "Episode 43, Iteration 54, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 174, Action Source: Model Prediction\n", "Episode 43, Iteration 55, State: (5.0, 13.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 175, Action Source: Model Prediction\n", "Episode 43, Iteration 56, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 176, Action Source: Model Prediction\n", "Episode 43, Iteration 57, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 177, Action Source: Model Prediction\n", "Episode 43, Iteration 58, State: (4.0, 12.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 179, Action Source: Exploration\n", "Episode 43, Iteration 59, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 181, Action Source: Exploration\n", "Episode 43, Iteration 60, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 183, Action Source: Model Prediction\n", "Episode: 43 Best Action: 1 Best evaluation action: 2\n", "Episode: 43 Score: 183 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 44, Iteration 1, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 44, Iteration 2, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 44, Iteration 3, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 44, Iteration 4, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 44, Iteration 5, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 44, Iteration 6, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 44, Iteration 7, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 44, Iteration 8, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 44, Iteration 9, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 44, Iteration 10, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 44, Iteration 11, State: (5.0, 15.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 44, Iteration 12, State: (5.0, 15.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 44, Iteration 13, State: (5.0, 15.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 44, Iteration 14, State: (5.0, 15.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 44, Iteration 15, State: (5.0, 15.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 44, Iteration 16, State: (5.0, 15.0, 125.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 33, Action Source: Exploration\n", "Episode 44, Iteration 17, State: (5.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 44, Iteration 18, State: (5.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 44, Iteration 19, State: (5.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 44, Iteration 20, State: (5.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 92.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 44, Iteration 21, State: (5.0, 16.0, 130.0, 92.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 47, Action Source: Exploration\n", "Episode 44, Iteration 22, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 44, Iteration 23, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 44, Iteration 24, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 44, Iteration 25, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 44, Iteration 26, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 44, Iteration 27, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 59, Action Source: Model Prediction\n", "Episode 44, Iteration 28, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 44, Iteration 29, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 44, Iteration 30, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 44, Iteration 31, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 44, Iteration 32, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 44, Iteration 33, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 44, Iteration 34, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 94.0), Reward: 2, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 44, Iteration 35, State: (5.0, 18.0, 140.0, 94.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 94.0), Reward: 2, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 44, Iteration 36, State: (5.0, 18.0, 140.0, 94.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 44, Iteration 37, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 44, Iteration 38, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 44, Iteration 39, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 44, Iteration 40, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 44, Iteration 41, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 44, Iteration 42, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 44, Iteration 43, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 44, Iteration 44, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 44, Iteration 45, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 44, Iteration 46, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 44, Iteration 47, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 44, Iteration 48, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 44, Iteration 49, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 44, Iteration 50, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 44, Iteration 51, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 100, Action Source: Exploration\n", "Episode 44, Iteration 52, State: (3.0, 16.0, 130.0, 92.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 44, Iteration 53, State: (3.0, 15.0, 125.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 44, Iteration 54, State: (4.0, 16.0, 130.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 92.0), Reward: 2, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 44, Iteration 55, State: (4.0, 16.0, 130.0, 92.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 93.0), Reward: 2, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 44, Iteration 56, State: (5.0, 17.0, 135.0, 93.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 94.0), Reward: 2, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 44, Iteration 57, State: (5.0, 18.0, 140.0, 94.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 44, Iteration 58, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 44, Iteration 59, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 44, Iteration 60, State: (4.0, 17.0, 135.0, 93.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 93.0), Reward: 1, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode: 44 Best Action: 2 Best evaluation action: 2\n", "Episode: 44 Score: 114 Best Reward: 3 Gsize State: 5.0 Bratio State: 1:16.0 Btime State: 130.0 convert: 2 minutes 10 seconds Temperature State: 92.0 Best Action Source: Exploration\n", "Episode 45, Iteration 1, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 45, Iteration 2, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 45, Iteration 3, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 45, Iteration 4, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 45, Iteration 5, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 45, Iteration 6, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 45, Iteration 7, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 45, Iteration 8, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 45, Iteration 9, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 45, Iteration 10, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 45, Iteration 11, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 45, Iteration 12, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 45, Iteration 13, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 45, Iteration 14, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 45, Iteration 15, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 45, Iteration 16, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 45, Iteration 17, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 45, Iteration 18, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 45, Iteration 19, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 45, Iteration 20, State: (4.0, 16.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 45, Iteration 21, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 45, Iteration 22, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 45, Iteration 23, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 45, Iteration 24, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 45, Iteration 25, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 45, Iteration 26, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 45, Iteration 27, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 45, Iteration 28, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 84, Action Source: Exploration\n", "Episode 45, Iteration 29, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 45, Iteration 30, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 45, Iteration 31, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 45, Iteration 32, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 45, Iteration 33, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 45, Iteration 34, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 45, Iteration 35, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 45, Iteration 36, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 45, Iteration 37, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 45, Iteration 38, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 45, Iteration 39, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 45, Iteration 40, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 120, Action Source: Exploration\n", "Episode 45, Iteration 41, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 45, Iteration 42, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 45, Iteration 43, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 45, Iteration 44, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 45, Iteration 45, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 45, Iteration 46, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 45, Iteration 47, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 45, Iteration 48, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 45, Iteration 49, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 150, Action Source: Model Prediction\n", "Episode 45, Iteration 50, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 153, Action Source: Model Prediction\n", "Episode 45, Iteration 51, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 45, Iteration 52, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 45, Iteration 53, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 162, Action Source: Model Prediction\n", "Episode 45, Iteration 54, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 165, Action Source: Model Prediction\n", "Episode 45, Iteration 55, State: (4.0, 15.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 169, Action Source: Exploration\n", "Episode 45, Iteration 56, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 173, Action Source: Model Prediction\n", "Episode 45, Iteration 57, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 177, Action Source: Exploitation\n", "Episode 45, Iteration 58, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 181, Action Source: Model Prediction\n", "Episode 45, Iteration 59, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 185, Action Source: Exploration\n", "Episode 45, Iteration 60, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 189, Action Source: Model Prediction\n", "Episode: 45 Best Action: 0 Best evaluation action: 0\n", "Episode: 45 Score: 189 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 46, Iteration 1, State: (4.0, 15.0, 117.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 46, Iteration 2, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 46, Iteration 3, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 46, Iteration 4, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 46, Iteration 5, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 46, Iteration 6, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 46, Iteration 7, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 21, Action Source: Exploration\n", "Episode 46, Iteration 8, State: (5.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 46, Iteration 9, State: (5.0, 16.0, 125.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 91.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 46, Iteration 10, State: (5.0, 16.0, 125.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 46, Iteration 11, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 46, Iteration 12, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 46, Iteration 13, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 46, Iteration 14, State: (4.0, 15.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 42, Action Source: Exploration\n", "Episode 46, Iteration 15, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 46, Iteration 16, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 46, Iteration 17, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 51, Action Source: Exploration\n", "Episode 46, Iteration 18, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 46, Iteration 19, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 57, Action Source: Exploration\n", "Episode 46, Iteration 20, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 46, Iteration 21, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 46, Iteration 22, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 46, Iteration 23, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 46, Iteration 24, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 46, Iteration 25, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 46, Iteration 26, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 46, Iteration 27, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 46, Iteration 28, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 46, Iteration 29, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 46, Iteration 30, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 46, Iteration 31, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 46, Iteration 32, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 46, Iteration 33, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 46, Iteration 34, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 46, Iteration 35, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 46, Iteration 36, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 46, Iteration 37, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 46, Iteration 38, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 46, Iteration 39, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 46, Iteration 40, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 46, Iteration 41, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 46, Iteration 42, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 131, Action Source: Exploration\n", "Episode 46, Iteration 43, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 46, Iteration 44, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 138, Action Source: Exploration\n", "Episode 46, Iteration 45, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 141, Action Source: Model Prediction\n", "Episode 46, Iteration 46, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 142, Action Source: Exploration\n", "Episode 46, Iteration 47, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 143, Action Source: Model Prediction\n", "Episode 46, Iteration 48, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 146, Action Source: Exploration\n", "Episode 46, Iteration 49, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 46, Iteration 50, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 46, Iteration 51, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 155, Action Source: Model Prediction\n", "Episode 46, Iteration 52, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 156, Action Source: Exploration\n", "Episode 46, Iteration 53, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 157, Action Source: Exploration\n", "Episode 46, Iteration 54, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 158, Action Source: Model Prediction\n", "Episode 46, Iteration 55, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 46, Iteration 56, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 160, Action Source: Model Prediction\n", "Episode 46, Iteration 57, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 46, Iteration 58, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 162, Action Source: Exploration\n", "Episode 46, Iteration 59, State: (4.0, 12.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 165, Action Source: Exploration\n", "Episode 46, Iteration 60, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 168, Action Source: Model Prediction\n", "Episode: 46 Best Action: 0 Best evaluation action: 0\n", "Episode: 46 Score: 168 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 47, Iteration 1, State: (5.0, 14.0, 117.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 47, Iteration 2, State: (5.0, 14.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 47, Iteration 3, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 47, Iteration 4, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 47, Iteration 5, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 47, Iteration 6, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 47, Iteration 7, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 47, Iteration 8, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 47, Iteration 9, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 47, Iteration 10, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 47, Iteration 11, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 47, Iteration 12, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 47, Iteration 13, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 47, Iteration 14, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 47, Iteration 15, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 30, Action Source: Exploration\n", "Episode 47, Iteration 16, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 47, Iteration 17, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 47, Iteration 18, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 47, Iteration 19, State: (5.0, 14.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 47, Iteration 20, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 47, Iteration 21, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 47, Iteration 22, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 47, Iteration 23, State: (4.0, 13.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 47, Iteration 24, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 47, Iteration 25, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 47, Iteration 26, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 47, Iteration 27, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 47, Iteration 28, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 47, Iteration 29, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 47, Iteration 30, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 47, Iteration 31, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 79, Action Source: Exploitation\n", "Episode 47, Iteration 32, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 82, Action Source: Exploration\n", "Episode 47, Iteration 33, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 47, Iteration 34, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 47, Iteration 35, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 47, Iteration 36, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 47, Iteration 37, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 47, Iteration 38, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 47, Iteration 39, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 47, Iteration 40, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 11.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 47, Iteration 41, State: (4.0, 11.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 47, Iteration 42, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 47, Iteration 43, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 47, Iteration 44, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 47, Iteration 45, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 47, Iteration 46, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 47, Iteration 47, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 47, Iteration 48, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 47, Iteration 49, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 47, Iteration 50, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 47, Iteration 51, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 47, Iteration 52, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 47, Iteration 53, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 47, Iteration 54, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 47, Iteration 55, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 47, Iteration 56, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 124, Action Source: Exploration\n", "Episode 47, Iteration 57, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 47, Iteration 58, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 11.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 47, Iteration 59, State: (5.0, 11.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 47, Iteration 60, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode: 47 Best Action: 0 Best evaluation action: 2\n", "Episode: 47 Score: 128 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Model Prediction\n", "Episode 48, Iteration 1, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 48, Iteration 2, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 48, Iteration 3, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 48, Iteration 4, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 48, Iteration 5, State: (4.0, 15.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 48, Iteration 6, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 48, Iteration 7, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 48, Iteration 8, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 48, Iteration 9, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 48, Iteration 10, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 36, Action Source: Exploitation\n", "Episode 48, Iteration 11, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 48, Iteration 12, State: (3.0, 14.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 43, Action Source: Exploration\n", "Episode 48, Iteration 13, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 48, Iteration 14, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 48, Iteration 15, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 49, Action Source: Exploration\n", "Episode 48, Iteration 16, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 48, Iteration 17, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 48, Iteration 18, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 48, Iteration 19, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 48, Iteration 20, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 48, Iteration 21, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 48, Iteration 22, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 87.0), Reward: 1, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 48, Iteration 23, State: (5.0, 15.0, 130.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 48, Iteration 24, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 48, Iteration 25, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 86.0), Reward: 2, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 48, Iteration 26, State: (4.0, 14.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 48, Iteration 27, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 48, Iteration 28, State: (3.0, 13.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 71, Action Source: Exploration\n", "Episode 48, Iteration 29, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 48, Iteration 30, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 48, Iteration 31, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 48, Iteration 32, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 48, Iteration 33, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 84.0), Reward: 3, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 48, Iteration 34, State: (3.0, 12.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 88, Action Source: Exploration\n", "Episode 48, Iteration 35, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 48, Iteration 36, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 48, Iteration 37, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 94, Action Source: Model Prediction\n", "Episode 48, Iteration 38, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 48, Iteration 39, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 48, Iteration 40, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 48, Iteration 41, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 48, Iteration 42, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 48, Iteration 43, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 84.0), Reward: 1, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 48, Iteration 44, State: (4.0, 12.0, 125.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 48, Iteration 45, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 104, Action Source: Exploration\n", "Episode 48, Iteration 46, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 48, Iteration 47, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 48, Iteration 48, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 48, Iteration 49, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 48, Iteration 50, State: (3.0, 11.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 48, Iteration 51, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 48, Iteration 52, State: (3.0, 10.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 48, Iteration 53, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 48, Iteration 54, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 48, Iteration 55, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 48, Iteration 56, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 48, Iteration 57, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 48, Iteration 58, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 48, Iteration 59, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 48, Iteration 60, State: (3.0, 9.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 134, Action Source: Exploration\n", "Episode: 48 Best Action: 0 Best evaluation action: 0\n", "Episode: 48 Score: 134 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0 Best Action Source: Exploration\n", "Episode 49, Iteration 1, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 49, Iteration 2, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 49, Iteration 3, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 49, Iteration 4, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 49, Iteration 5, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 49, Iteration 6, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 49, Iteration 7, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 49, Iteration 8, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 49, Iteration 9, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 49, Iteration 10, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 26, Action Source: Exploration\n", "Episode 49, Iteration 11, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 29, Action Source: Model Prediction\n", "Episode 49, Iteration 12, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 49, Iteration 13, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 49, Iteration 14, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 49, Iteration 15, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 49, Iteration 16, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 49, Iteration 17, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 52, Action Source: Exploitation\n", "Episode 49, Iteration 18, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 49, Iteration 19, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 49, Iteration 20, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 63, Action Source: Exploration\n", "Episode 49, Iteration 21, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 49, Iteration 22, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 49, Iteration 23, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 69, Action Source: Model Prediction\n", "Episode 49, Iteration 24, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 49, Iteration 25, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 49, Iteration 26, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 49, Iteration 27, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 76, Action Source: Exploration\n", "Episode 49, Iteration 28, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 77, Action Source: Exploration\n", "Episode 49, Iteration 29, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 49, Iteration 30, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 79, Action Source: Exploration\n", "Episode 49, Iteration 31, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 80, Action Source: Exploration\n", "Episode 49, Iteration 32, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 49, Iteration 33, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 49, Iteration 34, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 49, Iteration 35, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 85.0), Reward: 3, , Cumulative Score: 86, Action Source: Exploration\n", "Episode 49, Iteration 36, State: (3.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 85.0), Reward: 3, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 49, Iteration 37, State: (3.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 85.0), Reward: 3, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 49, Iteration 38, State: (3.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 85.0), Reward: 3, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 49, Iteration 39, State: (3.0, 12.0, 125.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 49, Iteration 40, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 49, Iteration 41, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 49, Iteration 42, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 49, Iteration 43, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 49, Iteration 44, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 102, Action Source: Model Prediction\n", "Episode 49, Iteration 45, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 49, Iteration 46, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 49, Iteration 47, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 105, Action Source: Model Prediction\n", "Episode 49, Iteration 48, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 49, Iteration 49, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 49, Iteration 50, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 49, Iteration 51, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 109, Action Source: Exploration\n", "Episode 49, Iteration 52, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 49, Iteration 53, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 85.0), Reward: 1, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 49, Iteration 54, State: (4.0, 12.0, 125.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 49, Iteration 55, State: (5.0, 13.0, 130.0, 86.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 49, Iteration 56, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 49, Iteration 57, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 87.0), Reward: 1, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 49, Iteration 58, State: (5.0, 14.0, 135.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 49, Iteration 59, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 49, Iteration 60, State: (4.0, 13.0, 130.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 86.0), Reward: 1, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode: 49 Best Action: 0 Best evaluation action: 0\n", "Episode: 49 Score: 118 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 50, Iteration 1, State: (4.0, 14.0, 122.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 50, Iteration 2, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 50, Iteration 3, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 50, Iteration 4, State: (4.0, 14.0, 120.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 50, Iteration 5, State: (5.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 50, Iteration 6, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 50, Iteration 7, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 50, Iteration 8, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 50, Iteration 9, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 50, Iteration 10, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 50, Iteration 11, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 50, Iteration 12, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 35, Action Source: Model Prediction\n", "Episode 50, Iteration 13, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 50, Iteration 14, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 41, Action Source: Model Prediction\n", "Episode 50, Iteration 15, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 50, Iteration 16, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 50, Iteration 17, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 50, Iteration 18, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 53, Action Source: Model Prediction\n", "Episode 50, Iteration 19, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 50, Iteration 20, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 59, Action Source: Exploration\n", "Episode 50, Iteration 21, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 62, Action Source: Exploration\n", "Episode 50, Iteration 22, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 65, Action Source: Exploration\n", "Episode 50, Iteration 23, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 50, Iteration 24, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 50, Iteration 25, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 74, Action Source: Exploration\n", "Episode 50, Iteration 26, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 50, Iteration 27, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 50, Iteration 28, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 50, Iteration 29, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 86, Action Source: Model Prediction\n", "Episode 50, Iteration 30, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 50, Iteration 31, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 50, Iteration 32, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 50, Iteration 33, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 50, Iteration 34, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 101, Action Source: Exploration\n", "Episode 50, Iteration 35, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 50, Iteration 36, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 50, Iteration 37, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 50, Iteration 38, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 50, Iteration 39, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 50, Iteration 40, State: (4.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 50, Iteration 41, State: (5.0, 16.0, 130.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 122, Action Source: Exploration\n", "Episode 50, Iteration 42, State: (5.0, 17.0, 135.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 50, Iteration 43, State: (5.0, 17.0, 135.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 50, Iteration 44, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 50, Iteration 45, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 50, Iteration 46, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 50, Iteration 47, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 50, Iteration 48, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 50, Iteration 49, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 50, Iteration 50, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 142, Action Source: Exploration\n", "Episode 50, Iteration 51, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 146, Action Source: Exploration\n", "Episode 50, Iteration 52, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 149, Action Source: Exploration\n", "Episode 50, Iteration 53, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 50, Iteration 54, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 155, Action Source: Model Prediction\n", "Episode 50, Iteration 55, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 157, Action Source: Exploration\n", "Episode 50, Iteration 56, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 160, Action Source: Exploration\n", "Episode 50, Iteration 57, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 162, Action Source: Exploration\n", "Episode 50, Iteration 58, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 164, Action Source: Model Prediction\n", "Episode 50, Iteration 59, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 166, Action Source: Model Prediction\n", "Episode 50, Iteration 60, State: (5.0, 15.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 168, Action Source: Model Prediction\n", "Episode: 50 Best Action: 0 Best evaluation action: 0\n", "Episode: 50 Score: 168 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 51, Iteration 1, State: (3.0, 15.0, 124.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 2, Action Source: Model Prediction\n", "Episode 51, Iteration 2, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 51, Iteration 3, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 51, Iteration 4, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 51, Iteration 5, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 51, Iteration 6, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 51, Iteration 7, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 14, Action Source: Model Prediction\n", "Episode 51, Iteration 8, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 51, Iteration 9, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 51, Iteration 10, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 20, Action Source: Model Prediction\n", "Episode 51, Iteration 11, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 51, Iteration 12, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 24, Action Source: Exploration\n", "Episode 51, Iteration 13, State: (4.0, 16.0, 130.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 26, Action Source: Exploration\n", "Episode 51, Iteration 14, State: (3.0, 15.0, 125.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 51, Iteration 15, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 51, Iteration 16, State: (3.0, 14.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 36, Action Source: Exploration\n", "Episode 51, Iteration 17, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 51, Iteration 18, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 51, Iteration 19, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 51, Iteration 20, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 51, Iteration 21, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 51, Iteration 22, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 51, Iteration 23, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 51, Iteration 24, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 51, Iteration 25, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 72, Action Source: Exploration\n", "Episode 51, Iteration 26, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 51, Iteration 27, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 51, Iteration 28, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 51, Iteration 29, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 51, Iteration 30, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 92, Action Source: Exploitation\n", "Episode 51, Iteration 31, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 96, Action Source: Exploitation\n", "Episode 51, Iteration 32, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 100, Action Source: Exploitation\n", "Episode 51, Iteration 33, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 51, Iteration 34, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 51, Iteration 35, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 51, Iteration 36, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 51, Iteration 37, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 114, Action Source: Exploitation\n", "Episode 51, Iteration 38, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 118, Action Source: Exploitation\n", "Episode 51, Iteration 39, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 122, Action Source: Exploitation\n", "Episode 51, Iteration 40, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 126, Action Source: Exploitation\n", "Episode 51, Iteration 41, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 51, Iteration 42, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 134, Action Source: Exploitation\n", "Episode 51, Iteration 43, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode 51, Iteration 44, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 142, Action Source: Exploitation\n", "Episode 51, Iteration 45, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 146, Action Source: Model Prediction\n", "Episode 51, Iteration 46, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 150, Action Source: Exploitation\n", "Episode 51, Iteration 47, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 154, Action Source: Exploitation\n", "Episode 51, Iteration 48, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 158, Action Source: Exploitation\n", "Episode 51, Iteration 49, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 160, Action Source: Exploitation\n", "Episode 51, Iteration 50, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 161, Action Source: Exploration\n", "Episode 51, Iteration 51, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 162, Action Source: Exploration\n", "Episode 51, Iteration 52, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 163, Action Source: Model Prediction\n", "Episode 51, Iteration 53, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 164, Action Source: Model Prediction\n", "Episode 51, Iteration 54, State: (5.0, 14.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 166, Action Source: Model Prediction\n", "Episode 51, Iteration 55, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 168, Action Source: Model Prediction\n", "Episode 51, Iteration 56, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 170, Action Source: Model Prediction\n", "Episode 51, Iteration 57, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 172, Action Source: Model Prediction\n", "Episode 51, Iteration 58, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 174, Action Source: Exploration\n", "Episode 51, Iteration 59, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 176, Action Source: Model Prediction\n", "Episode 51, Iteration 60, State: (4.0, 13.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 178, Action Source: Model Prediction\n", "Episode: 51 Best Action: 0 Best evaluation action: 0\n", "Episode: 51 Score: 178 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 52, Iteration 1, State: (5.0, 14.0, 117.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 120.0, 89.0), Reward: 1, , Cumulative Score: 1, Action Source: Exploration\n", "Episode 52, Iteration 2, State: (5.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 52, Iteration 3, State: (4.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 7, Action Source: Model Prediction\n", "Episode 52, Iteration 4, State: (4.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 52, Iteration 5, State: (4.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 11, Action Source: Exploration\n", "Episode 52, Iteration 6, State: (5.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 14, Action Source: Exploration\n", "Episode 52, Iteration 7, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 17, Action Source: Model Prediction\n", "Episode 52, Iteration 8, State: (5.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 52, Iteration 9, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 23, Action Source: Exploration\n", "Episode 52, Iteration 10, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 52, Iteration 11, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 29, Action Source: Exploration\n", "Episode 52, Iteration 12, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 52, Iteration 13, State: (5.0, 17.0, 135.0, 91.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 35, Action Source: Exploration\n", "Episode 52, Iteration 14, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 52, Iteration 15, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 52, Iteration 16, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 52, Iteration 17, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 41, Action Source: Exploration\n", "Episode 52, Iteration 18, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 52, Iteration 19, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 52, Iteration 20, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 52, Iteration 21, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 52, Iteration 22, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 52, Iteration 23, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 47, Action Source: Model Prediction\n", "Episode 52, Iteration 24, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 52, Iteration 25, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 52, Iteration 26, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 52, Iteration 27, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 52, Iteration 28, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 54, Action Source: Exploration\n", "Episode 52, Iteration 29, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 52, Iteration 30, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 52, Iteration 31, State: (5.0, 18.0, 140.0, 92.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 61, Action Source: Model Prediction\n", "Episode 52, Iteration 32, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 52, Iteration 33, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 52, Iteration 34, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 52, Iteration 35, State: (4.0, 17.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 66, Action Source: Exploration\n", "Episode 52, Iteration 36, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 52, Iteration 37, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 70, Action Source: Model Prediction\n", "Episode 52, Iteration 38, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 52, Iteration 39, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 74, Action Source: Model Prediction\n", "Episode 52, Iteration 40, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 52, Iteration 41, State: (3.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 52, Iteration 42, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 80, Action Source: Model Prediction\n", "Episode 52, Iteration 43, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 82, Action Source: Model Prediction\n", "Episode 52, Iteration 44, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 52, Iteration 45, State: (4.0, 16.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 87, Action Source: Exploration\n", "Episode 52, Iteration 46, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 52, Iteration 47, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 52, Iteration 48, State: (3.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 52, Iteration 49, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 52, Iteration 50, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 52, Iteration 51, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 52, Iteration 52, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 52, Iteration 53, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 52, Iteration 54, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 52, Iteration 55, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 52, Iteration 56, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 124, Action Source: Exploitation\n", "Episode 52, Iteration 57, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 52, Iteration 58, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 131, Action Source: Exploration\n", "Episode 52, Iteration 59, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 135, Action Source: Exploration\n", "Episode 52, Iteration 60, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode: 52 Best Action: 0 Best evaluation action: 0\n", "Episode: 52 Score: 139 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 53, Iteration 1, State: (4.0, 14.0, 122.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 53, Iteration 2, State: (4.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 53, Iteration 3, State: (4.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 10, Action Source: Exploration\n", "Episode 53, Iteration 4, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 13, Action Source: Exploration\n", "Episode 53, Iteration 5, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 53, Iteration 6, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 53, Iteration 7, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 22, Action Source: Model Prediction\n", "Episode 53, Iteration 8, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 25, Action Source: Model Prediction\n", "Episode 53, Iteration 9, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 53, Iteration 10, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 31, Action Source: Exploration\n", "Episode 53, Iteration 11, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 53, Iteration 12, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 53, Iteration 13, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 53, Iteration 14, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 37, Action Source: Exploration\n", "Episode 53, Iteration 15, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 53, Iteration 16, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 53, Iteration 17, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 53, Iteration 18, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 43, Action Source: Exploration\n", "Episode 53, Iteration 19, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 44, Action Source: Exploration\n", "Episode 53, Iteration 20, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 45, Action Source: Model Prediction\n", "Episode 53, Iteration 21, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 53, Iteration 22, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 53, Iteration 23, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 53, Iteration 24, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 53, Iteration 25, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 53, Iteration 26, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 53, Iteration 27, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 53, Iteration 28, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 53, Iteration 29, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 53, Iteration 30, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 53, Iteration 31, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 53, Iteration 32, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 53, Iteration 33, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 53, Iteration 34, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 53, Iteration 35, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 53, Iteration 36, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 53, Iteration 37, State: (4.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 94, Action Source: Exploitation\n", "Episode 53, Iteration 38, State: (5.0, 15.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 53, Iteration 39, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 53, Iteration 40, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 53, Iteration 41, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 53, Iteration 42, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 53, Iteration 43, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 53, Iteration 44, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 53, Iteration 45, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 53, Iteration 46, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 53, Iteration 47, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 53, Iteration 48, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 53, Iteration 49, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 53, Iteration 50, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 53, Iteration 51, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 53, Iteration 52, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 53, Iteration 53, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 143, Action Source: Exploration\n", "Episode 53, Iteration 54, State: (3.0, 14.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 146, Action Source: Exploration\n", "Episode 53, Iteration 55, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 53, Iteration 56, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 89.0), Reward: 3, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 53, Iteration 57, State: (4.0, 15.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 155, Action Source: Exploration\n", "Episode 53, Iteration 58, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 53, Iteration 59, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 53, Iteration 60, State: (5.0, 16.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 164, Action Source: Model Prediction\n", "Episode: 53 Best Action: 0 Best evaluation action: 2\n", "Episode: 53 Score: 164 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 54, Iteration 1, State: (3.0, 16.0, 116.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 54, Iteration 2, State: (4.0, 17.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 54, Iteration 3, State: (4.0, 17.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Exploration\n", "Episode 54, Iteration 4, State: (4.0, 17.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 54, Iteration 5, State: (3.0, 16.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 12, Action Source: Exploration\n", "Episode 54, Iteration 6, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 54, Iteration 7, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 54, Iteration 8, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 54, Iteration 9, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 54, Iteration 10, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 54, Iteration 11, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 54, Iteration 12, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 33, Action Source: Model Prediction\n", "Episode 54, Iteration 13, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 54, Iteration 14, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 39, Action Source: Model Prediction\n", "Episode 54, Iteration 15, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 54, Iteration 16, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 54, Iteration 17, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 54, Iteration 18, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 54, Iteration 19, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 54, Iteration 20, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 54, Iteration 21, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 54, Iteration 22, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 54, Iteration 23, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 54, Iteration 24, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 54, Iteration 25, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 54, Iteration 26, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 54, Iteration 27, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 78, Action Source: Model Prediction\n", "Episode 54, Iteration 28, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 54, Iteration 29, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 54, Iteration 30, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 54, Iteration 31, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 90, Action Source: Model Prediction\n", "Episode 54, Iteration 32, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 54, Iteration 33, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 54, Iteration 34, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 98, Action Source: Exploration\n", "Episode 54, Iteration 35, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 54, Iteration 36, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 103, Action Source: Exploration\n", "Episode 54, Iteration 37, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 105, Action Source: Exploration\n", "Episode 54, Iteration 38, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 54, Iteration 39, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 54, Iteration 40, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 112, Action Source: Exploration\n", "Episode 54, Iteration 41, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 115, Action Source: Exploration\n", "Episode 54, Iteration 42, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 54, Iteration 43, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 54, Iteration 44, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 54, Iteration 45, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 127, Action Source: Exploration\n", "Episode 54, Iteration 46, State: (3.0, 15.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 130, Action Source: Exploration\n", "Episode 54, Iteration 47, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 54, Iteration 48, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 54, Iteration 49, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 139, Action Source: Model Prediction\n", "Episode 54, Iteration 50, State: (4.0, 16.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 141, Action Source: Exploration\n", "Episode 54, Iteration 51, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 143, Action Source: Model Prediction\n", "Episode 54, Iteration 52, State: (5.0, 17.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 146, Action Source: Exploration\n", "Episode 54, Iteration 53, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 54, Iteration 54, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 54, Iteration 55, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 155, Action Source: Model Prediction\n", "Episode 54, Iteration 56, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 54, Iteration 57, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 54, Iteration 58, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 164, Action Source: Exploration\n", "Episode 54, Iteration 59, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 167, Action Source: Model Prediction\n", "Episode 54, Iteration 60, State: (5.0, 18.0, 135.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 170, Action Source: Model Prediction\n", "Episode: 54 Best Action: 0 Best evaluation action: 0\n", "Episode: 54 Score: 170 Best Reward: 3 Gsize State: 3.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 55, Iteration 1, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 55, Iteration 2, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 55, Iteration 3, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 55, Iteration 4, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 55, Iteration 5, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 55, Iteration 6, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 55, Iteration 7, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 55, Iteration 8, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 22, Action Source: Exploration\n", "Episode 55, Iteration 9, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 26, Action Source: Model Prediction\n", "Episode 55, Iteration 10, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 55, Iteration 11, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 55, Iteration 12, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 38, Action Source: Exploration\n", "Episode 55, Iteration 13, State: (3.0, 14.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 42, Action Source: Exploitation\n", "Episode 55, Iteration 14, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 55, Iteration 15, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 55, Iteration 16, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 50, Action Source: Exploration\n", "Episode 55, Iteration 17, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 52, Action Source: Exploration\n", "Episode 55, Iteration 18, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 55, Iteration 19, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 55, Iteration 20, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 60, Action Source: Exploration\n", "Episode 55, Iteration 21, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 55, Iteration 22, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 68, Action Source: Model Prediction\n", "Episode 55, Iteration 23, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 55, Iteration 24, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 55, Iteration 25, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 80, Action Source: Exploitation\n", "Episode 55, Iteration 26, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 84, Action Source: Model Prediction\n", "Episode 55, Iteration 27, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 88, Action Source: Model Prediction\n", "Episode 55, Iteration 28, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 55, Iteration 29, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 55, Iteration 30, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 100, Action Source: Exploitation\n", "Episode 55, Iteration 31, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 55, Iteration 32, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 108, Action Source: Exploitation\n", "Episode 55, Iteration 33, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 110, Action Source: Exploitation\n", "Episode 55, Iteration 34, State: (4.0, 14.0, 125.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 88.0), Reward: 1, , Cumulative Score: 111, Action Source: Exploration\n", "Episode 55, Iteration 35, State: (5.0, 15.0, 130.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 89.0), Reward: 2, , Cumulative Score: 113, Action Source: Exploration\n", "Episode 55, Iteration 36, State: (5.0, 16.0, 135.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 55, Iteration 37, State: (4.0, 15.0, 130.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 130.0, 88.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 55, Iteration 38, State: (4.0, 15.0, 130.0, 88.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 123, Action Source: Exploration\n", "Episode 55, Iteration 39, State: (3.0, 14.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 125.0, 87.0), Reward: 4, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 55, Iteration 40, State: (3.0, 14.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 131, Action Source: Exploration\n", "Episode 55, Iteration 41, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 55, Iteration 42, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 139, Action Source: Exploitation\n", "Episode 55, Iteration 43, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 143, Action Source: Exploitation\n", "Episode 55, Iteration 44, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 147, Action Source: Exploitation\n", "Episode 55, Iteration 45, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 151, Action Source: Exploitation\n", "Episode 55, Iteration 46, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 155, Action Source: Exploitation\n", "Episode 55, Iteration 47, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 159, Action Source: Model Prediction\n", "Episode 55, Iteration 48, State: (3.0, 13.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 162, Action Source: Exploration\n", "Episode 55, Iteration 49, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 165, Action Source: Model Prediction\n", "Episode 55, Iteration 50, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 168, Action Source: Model Prediction\n", "Episode 55, Iteration 51, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 171, Action Source: Model Prediction\n", "Episode 55, Iteration 52, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 174, Action Source: Model Prediction\n", "Episode 55, Iteration 53, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 177, Action Source: Exploration\n", "Episode 55, Iteration 54, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 85.0), Reward: 3, , Cumulative Score: 180, Action Source: Model Prediction\n", "Episode 55, Iteration 55, State: (3.0, 12.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 182, Action Source: Exploration\n", "Episode 55, Iteration 56, State: (3.0, 11.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 184, Action Source: Exploration\n", "Episode 55, Iteration 57, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 186, Action Source: Model Prediction\n", "Episode 55, Iteration 58, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 188, Action Source: Model Prediction\n", "Episode 55, Iteration 59, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 190, Action Source: Model Prediction\n", "Episode 55, Iteration 60, State: (3.0, 10.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 192, Action Source: Model Prediction\n", "Episode: 55 Best Action: 0 Best evaluation action: 2\n", "Episode: 55 Score: 192 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0 Best Action Source: Exploration\n", "Episode 56, Iteration 1, State: (5.0, 16.0, 122.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2, Action Source: Exploration\n", "Episode 56, Iteration 2, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4, Action Source: Model Prediction\n", "Episode 56, Iteration 3, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 56, Iteration 4, State: (5.0, 16.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9, Action Source: Exploration\n", "Episode 56, Iteration 5, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 56, Iteration 6, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 56, Iteration 7, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 18, Action Source: Model Prediction\n", "Episode 56, Iteration 8, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 56, Iteration 9, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 56, Iteration 10, State: (4.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 26, Action Source: Exploration\n", "Episode 56, Iteration 11, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 28, Action Source: Model Prediction\n", "Episode 56, Iteration 12, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 30, Action Source: Model Prediction\n", "Episode 56, Iteration 13, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 56, Iteration 14, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 34, Action Source: Model Prediction\n", "Episode 56, Iteration 15, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 36, Action Source: Model Prediction\n", "Episode 56, Iteration 16, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 56, Iteration 17, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 56, Iteration 18, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 56, Iteration 19, State: (5.0, 16.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 45, Action Source: Exploration\n", "Episode 56, Iteration 20, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 48, Action Source: Exploration\n", "Episode 56, Iteration 21, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 56, Iteration 22, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 56, Iteration 23, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 57, Action Source: Model Prediction\n", "Episode 56, Iteration 24, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 56, Iteration 25, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 56, Iteration 26, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 66, Action Source: Model Prediction\n", "Episode 56, Iteration 27, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 56, Iteration 28, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 72, Action Source: Model Prediction\n", "Episode 56, Iteration 29, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 56, Iteration 30, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 78, Action Source: Exploration\n", "Episode 56, Iteration 31, State: (5.0, 18.0, 135.0, 91.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 81, Action Source: Exploration\n", "Episode 56, Iteration 32, State: (5.0, 18.0, 135.0, 91.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 83, Action Source: Exploration\n", "Episode 56, Iteration 33, State: (4.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 56, Iteration 34, State: (4.0, 17.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 56, Iteration 35, State: (3.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 56, Iteration 36, State: (3.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 92, Action Source: Exploration\n", "Episode 56, Iteration 37, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 95, Action Source: Exploration\n", "Episode 56, Iteration 38, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 56, Iteration 39, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 56, Iteration 40, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 56, Iteration 41, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 56, Iteration 42, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 56, Iteration 43, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 56, Iteration 44, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 56, Iteration 45, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 56, Iteration 46, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 56, Iteration 47, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 56, Iteration 48, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 56, Iteration 49, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 56, Iteration 50, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 56, Iteration 51, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 56, Iteration 52, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 56, Iteration 53, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 143, Action Source: Exploration\n", "Episode 56, Iteration 54, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 146, Action Source: Model Prediction\n", "Episode 56, Iteration 55, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 149, Action Source: Exploration\n", "Episode 56, Iteration 56, State: (3.0, 15.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 152, Action Source: Exploration\n", "Episode 56, Iteration 57, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 155, Action Source: Model Prediction\n", "Episode 56, Iteration 58, State: (4.0, 16.0, 125.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 158, Action Source: Exploration\n", "Episode 56, Iteration 59, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 17.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 161, Action Source: Model Prediction\n", "Episode 56, Iteration 60, State: (5.0, 17.0, 130.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 164, Action Source: Exploration\n", "Episode: 56 Best Action: 0 Best evaluation action: 2\n", "Episode: 56 Score: 164 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:15.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 57, Iteration 1, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 57, Iteration 2, State: (4.0, 14.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 7, Action Source: Exploration\n", "Episode 57, Iteration 3, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 11, Action Source: Model Prediction\n", "Episode 57, Iteration 4, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 15, Action Source: Model Prediction\n", "Episode 57, Iteration 5, State: (3.0, 13.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 19, Action Source: Exploration\n", "Episode 57, Iteration 6, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 23, Action Source: Model Prediction\n", "Episode 57, Iteration 7, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 27, Action Source: Model Prediction\n", "Episode 57, Iteration 8, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 31, Action Source: Model Prediction\n", "Episode 57, Iteration 9, State: (3.0, 12.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 34, Action Source: Exploration\n", "Episode 57, Iteration 10, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 37, Action Source: Exploration\n", "Episode 57, Iteration 11, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 57, Iteration 12, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 43, Action Source: Model Prediction\n", "Episode 57, Iteration 13, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 46, Action Source: Exploration\n", "Episode 57, Iteration 14, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 49, Action Source: Model Prediction\n", "Episode 57, Iteration 15, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 57, Iteration 16, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 57, Iteration 17, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 58, Action Source: Model Prediction\n", "Episode 57, Iteration 18, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 61, Action Source: Exploitation\n", "Episode 57, Iteration 19, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 57, Iteration 20, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 57, Iteration 21, State: (3.0, 11.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 57, Iteration 22, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 57, Iteration 23, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 73, Action Source: Model Prediction\n", "Episode 57, Iteration 24, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 75, Action Source: Model Prediction\n", "Episode 57, Iteration 25, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 57, Iteration 26, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 57, Iteration 27, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 57, Iteration 28, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 57, Iteration 29, State: (3.0, 10.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 85, Action Source: Exploration\n", "Episode 57, Iteration 30, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 87, Action Source: Model Prediction\n", "Episode 57, Iteration 31, State: (3.0, 9.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 89, Action Source: Exploration\n", "Episode 57, Iteration 32, State: (3.0, 8.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 57, Iteration 33, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 92, Action Source: Model Prediction\n", "Episode 57, Iteration 34, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 93, Action Source: Model Prediction\n", "Episode 57, Iteration 35, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 94, Action Source: Exploration\n", "Episode 57, Iteration 36, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 57, Iteration 37, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 96, Action Source: Model Prediction\n", "Episode 57, Iteration 38, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 97, Action Source: Model Prediction\n", "Episode 57, Iteration 39, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 98, Action Source: Model Prediction\n", "Episode 57, Iteration 40, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 9.0, 130.0, 84.0), Reward: 1, , Cumulative Score: 99, Action Source: Exploration\n", "Episode 57, Iteration 41, State: (5.0, 9.0, 130.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 83.0), Reward: 1, , Cumulative Score: 100, Action Source: Exploration\n", "Episode 57, Iteration 42, State: (4.0, 8.0, 125.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 57, Iteration 43, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 104, Action Source: Model Prediction\n", "Episode 57, Iteration 44, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 106, Action Source: Model Prediction\n", "Episode 57, Iteration 45, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 57, Iteration 46, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 110, Action Source: Exploration\n", "Episode 57, Iteration 47, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 57, Iteration 48, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 114, Action Source: Exploration\n", "Episode 57, Iteration 49, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 57, Iteration 50, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 57, Iteration 51, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 57, Iteration 52, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 57, Iteration 53, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 57, Iteration 54, State: (3.0, 7.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 126, Action Source: Exploration\n", "Episode 57, Iteration 55, State: (3.0, 6.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 128, Action Source: Exploration\n", "Episode 57, Iteration 56, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 57, Iteration 57, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 132, Action Source: Exploration\n", "Episode 57, Iteration 58, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 57, Iteration 59, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 136, Action Source: Model Prediction\n", "Episode 57, Iteration 60, State: (3.0, 5.0, 120.0, 80.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 5.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 138, Action Source: Exploration\n", "Episode: 57 Best Action: 0 Best evaluation action: 2\n", "Episode: 57 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 58, Iteration 1, State: (4.0, 14.0, 121.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 58, Iteration 2, State: (4.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 58, Iteration 3, State: (4.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9, Action Source: Model Prediction\n", "Episode 58, Iteration 4, State: (4.0, 14.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 58, Iteration 5, State: (4.0, 14.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15, Action Source: Exploration\n", "Episode 58, Iteration 6, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18, Action Source: Exploration\n", "Episode 58, Iteration 7, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 21, Action Source: Model Prediction\n", "Episode 58, Iteration 8, State: (3.0, 13.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 25, Action Source: Exploration\n", "Episode 58, Iteration 9, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 29, Action Source: Model Prediction\n", "Episode 58, Iteration 10, State: (3.0, 12.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 32, Action Source: Exploration\n", "Episode 58, Iteration 11, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 35, Action Source: Model Prediction\n", "Episode 58, Iteration 12, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 38, Action Source: Model Prediction\n", "Episode 58, Iteration 13, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 40, Action Source: Exploration\n", "Episode 58, Iteration 14, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 42, Action Source: Model Prediction\n", "Episode 58, Iteration 15, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 58, Iteration 16, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 46, Action Source: Model Prediction\n", "Episode 58, Iteration 17, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 58, Iteration 18, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 50, Action Source: Model Prediction\n", "Episode 58, Iteration 19, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 58, Iteration 20, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 54, Action Source: Model Prediction\n", "Episode 58, Iteration 21, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 56, Action Source: Model Prediction\n", "Episode 58, Iteration 22, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 58, Action Source: Exploration\n", "Episode 58, Iteration 23, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 12.0, 125.0, 88.0), Reward: 2, , Cumulative Score: 60, Action Source: Model Prediction\n", "Episode 58, Iteration 24, State: (4.0, 12.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 61, Action Source: Exploration\n", "Episode 58, Iteration 25, State: (5.0, 13.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 62, Action Source: Model Prediction\n", "Episode 58, Iteration 26, State: (5.0, 13.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 58, Iteration 27, State: (5.0, 13.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 64, Action Source: Exploration\n", "Episode 58, Iteration 28, State: (5.0, 13.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (5.0, 13.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 65, Action Source: Model Prediction\n", "Episode 58, Iteration 29, State: (5.0, 13.0, 130.0, 89.0), Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 67, Action Source: Exploration\n", "Episode 58, Iteration 30, State: (5.0, 14.0, 135.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 69, Action Source: Exploration\n", "Episode 58, Iteration 31, State: (4.0, 13.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 73, Action Source: Exploration\n", "Episode 58, Iteration 32, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 77, Action Source: Model Prediction\n", "Episode 58, Iteration 33, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 81, Action Source: Model Prediction\n", "Episode 58, Iteration 34, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 85, Action Source: Model Prediction\n", "Episode 58, Iteration 35, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 89, Action Source: Model Prediction\n", "Episode 58, Iteration 36, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 91, Action Source: Exploration\n", "Episode 58, Iteration 37, State: (4.0, 13.0, 130.0, 89.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 93, Action Source: Exploration\n", "Episode 58, Iteration 38, State: (4.0, 13.0, 130.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 97, Action Source: Exploration\n", "Episode 58, Iteration 39, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 101, Action Source: Model Prediction\n", "Episode 58, Iteration 40, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 105, Action Source: Exploitation\n", "Episode 58, Iteration 41, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 58, Iteration 42, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 125.0, 88.0), Reward: 4, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 58, Iteration 43, State: (3.0, 12.0, 125.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 116, Action Source: Exploitation\n", "Episode 58, Iteration 44, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 58, Iteration 45, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 58, Iteration 46, State: (3.0, 11.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 125, Action Source: Exploration\n", "Episode 58, Iteration 47, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 58, Iteration 48, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 58, Iteration 49, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 58, Iteration 50, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 58, Iteration 51, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 86.0), Reward: 3, , Cumulative Score: 140, Action Source: Model Prediction\n", "Episode 58, Iteration 52, State: (3.0, 10.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 142, Action Source: Exploration\n", "Episode 58, Iteration 53, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 58, Iteration 54, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 146, Action Source: Model Prediction\n", "Episode 58, Iteration 55, State: (3.0, 9.0, 120.0, 85.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 147, Action Source: Exploration\n", "Episode 58, Iteration 56, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 58, Iteration 57, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode 58, Iteration 58, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 150, Action Source: Model Prediction\n", "Episode 58, Iteration 59, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 10.0, 125.0, 86.0), Reward: 1, , Cumulative Score: 151, Action Source: Model Prediction\n", "Episode 58, Iteration 60, State: (4.0, 10.0, 125.0, 86.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 153, Action Source: Model Prediction\n", "Episode: 58 Best Action: 0 Best evaluation action: 0\n", "Episode: 58 Score: 153 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 59, Iteration 1, State: (3.0, 15.0, 118.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4, Action Source: Exploration\n", "Episode 59, Iteration 2, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 8, Action Source: Model Prediction\n", "Episode 59, Iteration 3, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 59, Iteration 4, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 16, Action Source: Model Prediction\n", "Episode 59, Iteration 5, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 20, Action Source: Exploitation\n", "Episode 59, Iteration 6, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 59, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 28, Action Source: Exploration\n", "Episode 59, Iteration 8, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 59, Iteration 9, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 36, Action Source: Exploitation\n", "Episode 59, Iteration 10, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 39, Action Source: Exploitation\n", "Episode 59, Iteration 11, State: (4.0, 15.0, 125.0, 89.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 43, Action Source: Exploration\n", "Episode 59, Iteration 12, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 47, Action Source: Exploration\n", "Episode 59, Iteration 13, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 51, Action Source: Model Prediction\n", "Episode 59, Iteration 14, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 55, Action Source: Model Prediction\n", "Episode 59, Iteration 15, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 59, Action Source: Exploitation\n", "Episode 59, Iteration 16, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 63, Action Source: Model Prediction\n", "Episode 59, Iteration 17, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 67, Action Source: Model Prediction\n", "Episode 59, Iteration 18, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 71, Action Source: Model Prediction\n", "Episode 59, Iteration 19, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 75, Action Source: Exploitation\n", "Episode 59, Iteration 20, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 79, Action Source: Model Prediction\n", "Episode 59, Iteration 21, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 83, Action Source: Model Prediction\n", "Episode 59, Iteration 22, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 87, Action Source: Exploitation\n", "Episode 59, Iteration 23, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 91, Action Source: Model Prediction\n", "Episode 59, Iteration 24, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 95, Action Source: Model Prediction\n", "Episode 59, Iteration 25, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 99, Action Source: Model Prediction\n", "Episode 59, Iteration 26, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 103, Action Source: Model Prediction\n", "Episode 59, Iteration 27, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 107, Action Source: Model Prediction\n", "Episode 59, Iteration 28, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 108, Action Source: Exploration\n", "Episode 59, Iteration 29, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 109, Action Source: Model Prediction\n", "Episode 59, Iteration 30, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 59, Iteration 31, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 111, Action Source: Model Prediction\n", "Episode 59, Iteration 32, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 59, Iteration 33, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 113, Action Source: Model Prediction\n", "Episode 59, Iteration 34, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 59, Iteration 35, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 115, Action Source: Model Prediction\n", "Episode 59, Iteration 36, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 116, Action Source: Model Prediction\n", "Episode 59, Iteration 37, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 117, Action Source: Model Prediction\n", "Episode 59, Iteration 38, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 118, Action Source: Model Prediction\n", "Episode 59, Iteration 39, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 119, Action Source: Model Prediction\n", "Episode 59, Iteration 40, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 59, Iteration 41, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 121, Action Source: Model Prediction\n", "Episode 59, Iteration 42, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 59, Iteration 43, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 123, Action Source: Model Prediction\n", "Episode 59, Iteration 44, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 59, Iteration 45, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 125, Action Source: Model Prediction\n", "Episode 59, Iteration 46, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 59, Iteration 47, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 127, Action Source: Model Prediction\n", "Episode 59, Iteration 48, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 59, Iteration 49, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 129, Action Source: Model Prediction\n", "Episode 59, Iteration 50, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 59, Iteration 51, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 131, Action Source: Model Prediction\n", "Episode 59, Iteration 52, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 59, Iteration 53, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 133, Action Source: Model Prediction\n", "Episode 59, Iteration 54, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 59, Iteration 55, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 135, Action Source: Model Prediction\n", "Episode 59, Iteration 56, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 139, Action Source: Exploration\n", "Episode 59, Iteration 57, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 143, Action Source: Exploitation\n", "Episode 59, Iteration 58, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 147, Action Source: Model Prediction\n", "Episode 59, Iteration 59, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 148, Action Source: Exploitation\n", "Episode 59, Iteration 60, State: (4.0, 13.0, 125.0, 87.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 87.0), Reward: 1, , Cumulative Score: 149, Action Source: Model Prediction\n", "Episode: 59 Best Action: 0 Best evaluation action: 0\n", "Episode: 59 Score: 149 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n", "Episode 60, Iteration 1, State: (4.0, 16.0, 117.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3, Action Source: Model Prediction\n", "Episode 60, Iteration 2, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6, Action Source: Model Prediction\n", "Episode 60, Iteration 3, State: (4.0, 16.0, 120.0, 90.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 8, Action Source: Exploration\n", "Episode 60, Iteration 4, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 10, Action Source: Model Prediction\n", "Episode 60, Iteration 5, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 12, Action Source: Model Prediction\n", "Episode 60, Iteration 6, State: (3.0, 15.0, 120.0, 89.0), Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 16, Action Source: Exploration\n", "Episode 60, Iteration 7, State: (3.0, 14.0, 120.0, 88.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 20, Action Source: Exploration\n", "Episode 60, Iteration 8, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 24, Action Source: Model Prediction\n", "Episode 60, Iteration 9, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 28, Action Source: Exploitation\n", "Episode 60, Iteration 10, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 13.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 32, Action Source: Model Prediction\n", "Episode 60, Iteration 11, State: (3.0, 13.0, 120.0, 87.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 36, Action Source: Exploitation\n", "Episode 60, Iteration 12, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 40, Action Source: Model Prediction\n", "Episode 60, Iteration 13, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 44, Action Source: Model Prediction\n", "Episode 60, Iteration 14, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 48, Action Source: Model Prediction\n", "Episode 60, Iteration 15, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 52, Action Source: Model Prediction\n", "Episode 60, Iteration 16, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 56, Action Source: Exploitation\n", "Episode 60, Iteration 17, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 60, Action Source: Exploitation\n", "Episode 60, Iteration 18, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 64, Action Source: Model Prediction\n", "Episode 60, Iteration 19, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 68, Action Source: Exploration\n", "Episode 60, Iteration 20, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 72, Action Source: Exploitation\n", "Episode 60, Iteration 21, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 76, Action Source: Model Prediction\n", "Episode 60, Iteration 22, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 80, Action Source: Exploitation\n", "Episode 60, Iteration 23, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 84, Action Source: Exploitation\n", "Episode 60, Iteration 24, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 88, Action Source: Exploitation\n", "Episode 60, Iteration 25, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 92, Action Source: Exploitation\n", "Episode 60, Iteration 26, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 1, Evaluation Action 1, Next State: (3.0, 12.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 96, Action Source: Exploitation\n", "Episode 60, Iteration 27, State: (3.0, 12.0, 120.0, 86.0), Agent Action: 0, Evaluation Action 1, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 98, Action Source: Exploitation\n", "Episode 60, Iteration 28, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 11.0, 120.0, 85.0), Reward: 2, , Cumulative Score: 100, Action Source: Model Prediction\n", "Episode 60, Iteration 29, State: (3.0, 11.0, 120.0, 85.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 102, Action Source: Exploration\n", "Episode 60, Iteration 30, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 104, Action Source: Exploration\n", "Episode 60, Iteration 31, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 106, Action Source: Exploration\n", "Episode 60, Iteration 32, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 108, Action Source: Model Prediction\n", "Episode 60, Iteration 33, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 110, Action Source: Model Prediction\n", "Episode 60, Iteration 34, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 112, Action Source: Model Prediction\n", "Episode 60, Iteration 35, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 114, Action Source: Model Prediction\n", "Episode 60, Iteration 36, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 10.0, 120.0, 84.0), Reward: 2, , Cumulative Score: 116, Action Source: Exploration\n", "Episode 60, Iteration 37, State: (3.0, 10.0, 120.0, 84.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 118, Action Source: Exploration\n", "Episode 60, Iteration 38, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 120, Action Source: Model Prediction\n", "Episode 60, Iteration 39, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 122, Action Source: Model Prediction\n", "Episode 60, Iteration 40, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 124, Action Source: Model Prediction\n", "Episode 60, Iteration 41, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 9.0, 120.0, 83.0), Reward: 2, , Cumulative Score: 126, Action Source: Model Prediction\n", "Episode 60, Iteration 42, State: (3.0, 9.0, 120.0, 83.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 8.0, 120.0, 82.0), Reward: 2, , Cumulative Score: 128, Action Source: Model Prediction\n", "Episode 60, Iteration 43, State: (3.0, 8.0, 120.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 130, Action Source: Model Prediction\n", "Episode 60, Iteration 44, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 132, Action Source: Model Prediction\n", "Episode 60, Iteration 45, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 2, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 133, Action Source: Exploration\n", "Episode 60, Iteration 46, State: (4.0, 8.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 134, Action Source: Model Prediction\n", "Episode 60, Iteration 47, State: (4.0, 8.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 135, Action Source: Exploration\n", "Episode 60, Iteration 48, State: (4.0, 8.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 136, Action Source: Exploration\n", "Episode 60, Iteration 49, State: (4.0, 8.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 137, Action Source: Model Prediction\n", "Episode 60, Iteration 50, State: (4.0, 8.0, 125.0, 82.0), Agent Action: 1, Evaluation Action 2, Next State: (4.0, 8.0, 125.0, 82.0), Reward: 1, , Cumulative Score: 138, Action Source: Model Prediction\n", "Episode 60, Iteration 51, State: (4.0, 8.0, 125.0, 82.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 140, Action Source: Exploration\n", "Episode 60, Iteration 52, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 142, Action Source: Model Prediction\n", "Episode 60, Iteration 53, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 144, Action Source: Model Prediction\n", "Episode 60, Iteration 54, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 146, Action Source: Model Prediction\n", "Episode 60, Iteration 55, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 148, Action Source: Model Prediction\n", "Episode 60, Iteration 56, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 150, Action Source: Model Prediction\n", "Episode 60, Iteration 57, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 152, Action Source: Model Prediction\n", "Episode 60, Iteration 58, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 154, Action Source: Model Prediction\n", "Episode 60, Iteration 59, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 1, Evaluation Action 2, Next State: (3.0, 7.0, 120.0, 81.0), Reward: 2, , Cumulative Score: 156, Action Source: Model Prediction\n", "Episode 60, Iteration 60, State: (3.0, 7.0, 120.0, 81.0), Agent Action: 0, Evaluation Action 2, Next State: (3.0, 6.0, 120.0, 80.0), Reward: 2, , Cumulative Score: 158, Action Source: Model Prediction\n", "Episode: 60 Best Action: 0 Best evaluation action: 0\n", "Episode: 60 Score: 158 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0 Best Action Source: Exploration\n" ] } ], "source": [ "# 3 action (Add ReplayBuffer to Action, takeout exploitation)\n", "\n", "# Iteration w/ combine 3 actions in every episode\n", "# Instantiate the environment\n", "#env = PouroverEnv()\n", "#buffer = ReplayBuffer(buffer_size=2000)\n", "\n", "# Training Loop\n", "episodes = 60\n", "y_true_rl = []\n", "y_pred_rl = []\n", "rl_scores = []\n", "rl_rewards = []\n", "total_timesteps_rl = []\n", "training_results_rl = []\n", "count_rewards_rl = {}\n", "count_rewards_action_sources = {}\n", "total_actions = 0\n", "\n", "epsilon = 0.5 # Exploration rate\n", "epsilon_decay = 0.99 # Decay rate for exploration\n", "min_epsilon = 0.01 # Minimum exploration rate\n", "\n", "count_action_sources = {\"Exploitation\": 0, \"Exploration\": 0, \"Model Prediction\": 0}\n", "\n", "for episode in range(1, episodes + 1):\n", " state = env.reset()\n", " state = tuple(state) # Convert state to a hashable type\n", " done = False\n", " score = 0\n", "\n", " best_episode = None\n", " best_reward = -float('inf')\n", " best_score = 0\n", " best_action = None\n", " evaluation_best_action = None\n", "\n", " iteration = 0\n", " while not done:\n", " iteration += 1\n", " env.render()\n", "\n", " exploration_action = None\n", " predicted_action = None\n", " replay_action = None\n", " action_source = None\n", "\n", "\n", " # Check for exploration\n", " if action_source is None and np.random.rand() < epsilon:\n", " exploration_action = env.action_space.sample()\n", " action_source = \"Exploration\"\n", "\n", " # Check for replay buffer\n", " replay_experience = buffer.sample(1)\n", " if replay_experience:\n", " replay_state, replay_action, _, _, _ = replay_experience[0]\n", " if replay_state == state:\n", " action = replay_action\n", " action_source = \"Exploitation\"\n", "\n", " # Check for model prediction\n", " if action_source is None:\n", " predicted_action, _ = model.predict(state)\n", " predicted_action = int(predicted_action)\n", " action_source = \"Model Prediction\"\n", "\n", "\n", " # Choose the action based on source\n", " if exploration_action is not None:\n", " action = exploration_action\n", " elif action_source == \"Exploitation\":\n", " action = replay_action\n", " else:\n", " action = predicted_action\n", "\n", "\n", " # Increment the count for the action source\n", " if action_source in count_action_sources:\n", " count_action_sources[action_source] += 1\n", " else:\n", " print(f\"Unknown action source: {action_source}\")\n", " total_actions += 1\n", "\n", " # Execute the action in the environment\n", " evaluation_action = env.get_evaluation_action(state)\n", "\n", " while (state, action) in buffer.negative_experiences:\n", " action = env.action_space.sample()\n", "\n", " next_state, reward, done, info = env.step(action)\n", " next_state = tuple(next_state) # Convert next_state to a hashable type\n", " score += reward\n", "\n", " buffer.add((state, action, reward, next_state, done))\n", "\n", " if reward == 4:\n", " for _ in range(100):\n", " buffer.add((state, action, reward, next_state, done))\n", "\n", "\n", " # Print state awal, action, next state, and reward\n", " #if action != evaluation_action and reward ==5:\n", " print(f'Episode {episode}, Iteration {iteration}, State: {state}, Agent Action: {action}, Evaluation Action {evaluation_action}, Next State: {next_state}, Reward: {reward}, , Cumulative Score: {score}, Action Source: {action_source}')\n", "\n", " state = next_state\n", "\n", " # Track the best reward in every episode\n", " if reward > best_reward:\n", " best_reward = reward\n", " best_episode = (state, action, reward, next_state, done, score, action_source)\n", " best_action = action\n", " evaluation_best_action = evaluation_action\n", "\n", "\n", " # Print iteration details\n", " # print(f'Episode: {episode}, Iteration: {iteration}, State: {state}, Action: {action}, evaluation action: {evaluation_action}, Reward: {reward}, Cumulative Score: {score}, Action Source: {action_source}')\n", "\n", " # Print the best episode found in this iteration\n", " if best_episode:\n", " best_state, best_action, best_reward, best_next_state, best_done, best_score, best_action_source = best_episode\n", " gsize_state, bratio_state, btime_state, temperature_state = best_state\n", "\n", " rl_scores.append(score)\n", " rl_rewards.append(best_reward)\n", " total_timesteps_rl.append(iteration)\n", "\n", " # Convert btime_state from seconds to minutes and seconds\n", " minutes = int(btime_state // 60)\n", " seconds = int(btime_state % 60)\n", "\n", " # Store training results in the list\n", " training_results_rl.append({\n", " 'Episode': episode,\n", " 'Score': score,\n", " 'Best Reward': best_reward,\n", " 'Gsize State': gsize_state,\n", " 'Bratio State': bratio_state,\n", " 'Btime State (sec)': btime_state,\n", " 'Btime State (min:sec)': f'{minutes} minutes {seconds} seconds',\n", " 'Temperature State': temperature_state,\n", " 'Best Action Source': best_action_source\n", " })\n", " \n", " #if best_reward == 5:\n", " #if best_action != evaluation_best_action and best_reward ==5:\n", " print(f'Episode: {episode} Best Action: {best_action} Best evaluation action: {evaluation_best_action}')\n", " print(f'Episode: {episode} Score: {score} Best Reward: {best_reward} Gsize State: {gsize_state} Bratio State: 1:{bratio_state} Btime State: {btime_state} convert: {minutes} minutes {seconds} seconds Temperature State: {temperature_state} Best Action Source: {best_action_source}')\n", "\n", " # Append the best action and its corresponding evaluation action\n", " if best_action is not None and evaluation_best_action is not None:\n", " if best_action != evaluation_best_action and best_reward ==4:\n", " y_true_rl.append(best_action)\n", " y_pred_rl.append(best_action)\n", " else:\n", " y_true_rl.append(evaluation_best_action)\n", " y_pred_rl.append(best_action)\n", "\n", " # Count total for every reward\n", " if best_reward in count_rewards_rl:\n", " count_rewards_rl[best_reward] += 1\n", " else:\n", " count_rewards_rl[best_reward] = 1\n", "\n", " # Count total reward w/ action source\n", " if best_reward not in count_rewards_action_sources:\n", " count_rewards_action_sources[best_reward] = {\"Exploration\": 0, \"Exploitation\": 0, \"Model Prediction\": 0}\n", " count_rewards_action_sources[best_reward][action_source] += 1\n", "\n", " # Decay epsilon\n", " if epsilon > min_epsilon:\n", " epsilon *= epsilon_decay\n", "\n", "env.close()\n" ] }, { "cell_type": "code", "execution_count": 1042, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Jumlah data dalam replay buffer: 2000\n" ] } ], "source": [ "buffer_size = len(buffer)\n", "print(f\"Jumlah data dalam replay buffer: {buffer_size}\")" ] }, { "cell_type": "code", "execution_count": 1043, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Data hasil training berhasil diekspor ke 'training_results_rl.xlsx'.\n" ] } ], "source": [ "# Convert the list of dictionaries to a DataFrame\n", "df = pd.DataFrame(training_results_rl)\n", "\n", "# Simpan DataFrame ke file Excel\n", "df.to_excel('training_results_rl.xlsx', index=False)\n", "\n", "print(f\"Data hasil training berhasil diekspor ke 'training_results_rl.xlsx'.\")" ] }, { "cell_type": "code", "execution_count": 1044, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Success export to 'all_data_replay_buffer.xlsx'.\n" ] } ], "source": [ "# Pull all experiences to replay buffer\n", "all_experiences = []\n", "for _ in range(len(buffer)): \n", " experience = buffer.sample(1)\n", " state, action, reward, next_state, done = experience[0]\n", " all_experiences.append((state, action, reward, next_state, done))\n", "\n", "# Change to DataFrame\n", "df = pd.DataFrame(all_experiences, columns=['State', 'Action', 'Reward', 'Next_State', 'Done'])\n", "\n", "# Save to Excel\n", "df.to_excel('all_data_replay_buffer.xlsx', index=False)\n", "\n", "print(f\"Success export to 'all_data_replay_buffer.xlsx'.\")\n" ] }, { "cell_type": "code", "execution_count": 1033, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Reward 3: 6 times\n", "Reward 4: 54 times\n" ] } ], "source": [ "# Print the total counts of each reward\n", "for reward, count in sorted(count_rewards_rl.items()):\n", " print(f'Reward {reward}: {count} times')" ] }, { "cell_type": "code", "execution_count": 1040, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "SM5upVW97i-A", "outputId": "7c55cff8-0a4d-4993-bb29-d9420a4f8af7" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Total Reward 3: 6 times\n", " Exploration: 4 times\n", " Exploitation: 0 times\n", " Model Prediction: 2 times\n", "\n", "Total Reward 4: 54 times\n", " Exploration: 17 times\n", " Exploitation: 1 times\n", " Model Prediction: 36 times\n" ] } ], "source": [ "# Print the total counts for every reward w/ action sources\n", "for best_reward, count in sorted(count_rewards_rl.items()):\n", " print(f'\\nTotal Reward {best_reward}: {count} times')\n", " for source, count in count_rewards_action_sources[best_reward].items():\n", " print(f' {source}: {count} times')" ] }, { "cell_type": "code", "execution_count": 1039, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "PLamsLYSH6b7", "outputId": "d6a52a24-9b4e-49ff-8819-8275bd40f040" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Exploitation: 4.19%\n", "Exploration: 36.33%\n", "Model Prediction: 59.47%\n" ] } ], "source": [ "for source, count in count_action_sources.items():\n", " percentage = (count / total_actions) * 100\n", " print(f\"{source}: {percentage:.2f}%\")" ] }, { "cell_type": "code", "execution_count": 1041, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Confusion Matrix:\n", "[[54 0 1]\n", " [ 0 1 0]\n", " [ 1 1 2]]\n", "Accuracy: 95.00%\n", "Precision: 95.28%\n", "Recall: 95.00%\n", "F1 Score: 94.92%\n" ] } ], "source": [ "def evaluate_model(y_true, y_pred):\n", " cm = confusion_matrix(y_true, y_pred)\n", " accuracy = accuracy_score(y_true, y_pred)\n", " precision = precision_score(y_true, y_pred, average='weighted') #macro for individual\n", " recall = recall_score(y_true, y_pred, average='weighted') # weighted for distribution value\n", " f1 = f1_score(y_true, y_pred, average='weighted')\n", "\n", " accuracy *= 100\n", " precision *= 100\n", " recall *= 100\n", " f1 *= 100\n", "\n", " # Extract TP, TN, FP, FN for each class if needed\n", " if cm.shape == (2, 2):\n", " tn, fp, fn, tp = cm.ravel()\n", " return cm, accuracy, precision, recall, f1, tp, tn, fp, fn\n", " else:\n", " return cm, accuracy, precision, recall, f1\n", "\n", "# Example usage:\n", "cm, accuracy, precision, recall, f1, *confusion_values = evaluate_model(y_true_rl, y_pred_rl)\n", "print(f'Confusion Matrix:\\n{cm}')\n", "print(f'Accuracy: {accuracy:.2f}%')\n", "print(f'Precision: {precision:.2f}%')\n", "print(f'Recall: {recall:.2f}%')\n", "print(f'F1 Score: {f1:.2f}%')\n", "\n", "# If you need to print confusion values for 2-class case\n", "if len(confusion_values) == 4:\n", " tp, tn, fp, fn = confusion_values\n", " print(f'True Positives: {tp}')\n", " print(f'True Negatives: {tn}')\n", " print(f'False Positives: {fp}')\n", " print(f'False Negatives: {fn}')\n" ] }, { "cell_type": "code", "execution_count": 1035, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 472 }, "id": "lo6XTGns8sEK", "outputId": "ba950894-660a-42c7-ffca-ee86307b7450" }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC5EUlEQVR4nOydd7gU5dn/v7N9Ty+cwqGDKEUpIiL2QkRsUUkxwUQjb3w1YCIkJpL81DSDMaaoMRpjlCTqa4saS9QYC1gQFAEVFAHpcDhwet36/P7YfZ6Z7TO7M7Oze+7PdZ0Lzu6c2dnZ2Zl7vvf3vm+JMcZAEARBEARRpNjyvQEEQRAEQRBGQsEOQRAEQRBFDQU7BEEQBEEUNRTsEARBEARR1FCwQxAEQRBEUUPBDkEQBEEQRQ0FOwRBEARBFDUU7BAEQRAEUdRQsEMQBEEQRFFDwQ5BKBg9ejSuuOKKfG8GkYErrrgCo0ePzvrvX3rpJUybNg0ejweSJKGjo0O3bSMKnxUrVkCSJOzcuTPfm0LoBAU7hO7wE8X777+f700pOAYGBvD73/8es2bNQmVlJTweD4488kgsXrwYn332Wb43LytOP/10SJIkfrxeL6ZMmYI//OEPCIfDpm9Pa2srvvKVr8Dr9eLuu+/GP/7xD5SWlpq+HYXII488gj/84Q+qlx89ejTOP/984zaIIFTiyPcGEISV2LJlC2y2/NwDHD58GOeccw7WrVuH888/H1//+tdRVlaGLVu24NFHH8V9990Hv9+fl23LleHDh2P58uUAIu/zkUcewZIlS3Do0CHccsstmtf3l7/8JetA6b333kN3dzd+8YtfYM6cOVmtY7DyyCOP4OOPP8Z1112X700xlG984xu49NJL4Xa7870phE5QsEMULcFgEOFwGC6XS/Xf5PPkdsUVV2D9+vV48sknMX/+/JjnfvGLX+AnP/mJLq+TzX7JlcrKSlx22WXi96uvvhoTJkzAXXfdhZ///Oew2+2a1ud0OrPelpaWFgBAVVVV1uuIp7e3l9QhC6P187Hb7ZqPScLaUBqLyBv79u3DlVdeiYaGBrjdbkyePBkPPPBAzDJ+vx833XQTZsyYgcrKSpSWluKUU07B66+/HrPczp07IUkSbr/9dvzhD3/AuHHj4Ha7sXnzZvz0pz+FJEnYtm0brrjiClRVVaGyshLf+ta30NfXF7OeeM8OT8m9/fbbWLp0Kerq6lBaWoqLL74Yhw4divnbcDiMn/70p2hqakJJSQnOOOMMbN68WZUPaM2aNXjhhRewcOHChEAHiARht99+u/j99NNPx+mnn56wXLyXJdV+Wb9+PRwOB372s58lrGPLli2QJAl//OMfxWMdHR247rrrMGLECLjdbhxxxBH49a9/nbW64vF4MHPmTHR3d4vgg/PQQw9hxowZ8Hq9qKmpwaWXXoo9e/aofp/33XefeJ8zZ87Ee++9J5Y7/fTTcfnllwMAZs6cCUmSYj6bJ554Qrz2kCFDcNlll2Hfvn0Jr11WVobt27fj3HPPRXl5ORYsWAAgcgzccccdOOaYY+DxeFBXV4dzzjknIaWr5j2efvrpOProo/Hhhx/itNNOQ0lJCY444gg8+eSTAICVK1di1qxZ8Hq9OOqoo/Df//43YT+r+Y698cYbkCQJjz/+OG655RYMHz4cHo8HZ511FrZt2xazPS+88AJ27dolUpK5+Ka07o8333wTX/7ylzFy5Ei43W6MGDECS5YsQX9/f8xy6T4fSZKwePFiPPPMMzj66KPFPnnppZdi1pHMs8NTcm+99RaOP/54eDwejB07Fn//+98T3g//zLxeL4YPH45f/vKXePDBB8kHlEdI2SHywsGDB3HCCSeIk09dXR1efPFFLFy4EF1dXUIm7+rqwv3334+vfe1r+Pa3v43u7m789a9/xdy5c7F27VpMmzYtZr0PPvggBgYGcNVVV8HtdqOmpkY895WvfAVjxozB8uXL8cEHH+D+++9HfX09fv3rX2fc3muvvRbV1dW4+eabsXPnTvzhD3/A4sWL8dhjj4llli1bhttuuw0XXHAB5s6di40bN2Lu3LkYGBjIuP5nn30WQEQ+N4L4/TJ06FCcdtppePzxx3HzzTfHLPvYY4/Bbrfjy1/+MgCgr68Pp512Gvbt24f//d//xciRI/HOO+9g2bJlOHDggCYPhxIeoCgVlltuuQU33ngjvvKVr+B//ud/cOjQIdx111049dRTsX79+oxqzCOPPILu7m787//+LyRJwm233YZLLrkEn3/+OZxOJ37yk5/gqKOOwn333Yef//znGDNmDMaNGwcgcoH71re+hZkzZ2L58uU4ePAg7rjjDrz99tsJrx0MBjF37lycfPLJuP3221FSUgIAWLhwIVasWIF58+bhf/7nfxAMBvHmm2/i3XffxXHHHaf5Pba3t+P888/HpZdeii9/+cu45557cOmll+Lhhx/Gddddh6uvvhpf//rX8Zvf/AZf+tKXsGfPHpSXlwNQ/x3j3HrrrbDZbPjBD36Azs5O3HbbbViwYAHWrFkDAPjJT36Czs5O7N27F7///e8BAGVlZVo/9gTU7o8nnngCfX19uOaaa1BbW4u1a9firrvuwt69e/HEE0/ErDPV5wMAb731Fp566il85zvfQXl5Oe68807Mnz8fu3fvRm1tbdpt3bZtG770pS9h4cKFuPzyy/HAAw/giiuuwIwZMzB58mQAkQDzjDPOgCRJWLZsGUpLS3H//fdTSizfMILQmQcffJABYO+9917KZRYuXMiGDh3KDh8+HPP4pZdeyiorK1lfXx9jjLFgMMh8Pl/MMu3t7ayhoYFdeeWV4rEdO3YwAKyiooK1tLTELH/zzTczADHLM8bYxRdfzGpra2MeGzVqFLv88ssT3sucOXNYOBwWjy9ZsoTZ7XbW0dHBGGOsubmZORwOdtFFF8Ws76c//SkDELPOZFx88cUMAGtvb0+7HOe0005jp512WsLjl19+ORs1apT4Pd1++fOf/8wAsI8++ijm8UmTJrEzzzxT/P6LX/yClZaWss8++yxmuRtuuIHZ7Xa2e/fujNs6YcIEdujQIXbo0CH26aefsuuvv54BYOedd55YbufOncxut7Nbbrkl5u8/+ugj5nA4Yh5P9T5ra2tZW1ubePxf//oXA8Cee+458Viy49Pv97P6+np29NFHs/7+fvH4888/zwCwm266Kea1AbAbbrghZjtfe+01BoB997vfTdgH/NjR8h5PO+00BoA98sgj4rFPP/2UAWA2m429++674vGXX36ZAWAPPvigeEztd+z1119nANjEiRNjvmt33HFHwvFx3nnnxez3TIwaNSrmM45Hy/7g26tk+fLlTJIktmvXLvFYqs+HMcYAMJfLxbZt2yYe27hxIwPA7rrrLvEYP0Z27NgR814AsFWrVonHWlpamNvtZt///vfFY9deey2TJImtX79ePNba2spqamoS1kmYB6WxCNNhjOGf//wnLrjgAjDGcPjwYfEzd+5cdHZ24oMPPgAQyZ1zb0k4HEZbWxuCwSCOO+44sYyS+fPno66uLunrXn311TG/n3LKKWhtbUVXV1fGbb7qqqsgSVLM34ZCIezatQsA8OqrryIYDOI73/lOzN9de+21GdcNQGwDvyvXm2T75ZJLLoHD4YhRpz7++GNs3rwZX/3qV8VjTzzxBE455RRUV1fHfFZz5sxBKBTCqlWrMr7+p59+irq6OtTV1WHChAn4zW9+gwsvvBArVqwQyzz11FMIh8P4yle+EvM6jY2NGD9+fELqMhlf/epXUV1dLX4/5ZRTAACff/552r97//330dLSgu985zvweDzi8fPOOw8TJkzACy+8kPA311xzTczv//znPyFJUoJSBkAcO1rfY1lZGS699FLx+1FHHYWqqipMnDgRs2bNEo/z//P3qeU7xvnWt74V4+NSu+9yQcv+8Hq94v+9vb04fPgwTjzxRDDGsH79+oR1x38+nDlz5gg1DwCmTJmCiooKVe9z0qRJYr8AQF1dHY466qiYv33ppZcwe/bsGNW5pqZGpNKI/EBpLMJ0Dh06hI6ODtx333247777ki6j9HH87W9/w29/+1t8+umnCAQC4vExY8Yk/F2yxzgjR46M+Z1fFNvb21FRUZF2m9P9LQAR9BxxxBExy9XU1MRcfFPBX7+7u1tX4ywn2X4ZMmQIzjrrLDz++OP4xS9+ASCSwnI4HLjkkkvEclu3bsWHH36YMoiM99wkY/To0aKCavv27bjllltw6NChmMBi69atYIxh/PjxSdehxpSc6XNKBf/8jjrqqITnJkyYgLfeeivmMYfDgeHDh8c8tn37djQ1NcWkTuPR+h6HDx8eE2QDEbP3iBEjEh4D5Pep9TsGZL/vckHL/ti9ezduuukmPPvsswnb1NnZGfN7ss+HE/8+gch7VfM+1fztrl27MHv27ITl4s8NhLlQsEOYDje1XnbZZcIsGs+UKVMARIyLV1xxBS666CJcf/31qK+vh91ux/Lly7F9+/aEv1Pe/cWTqrqCMZZxm3P5WzVMmDABAPDRRx/F3DmmQpKkpK8dCoWSLp9qv1x66aX41re+hQ0bNmDatGl4/PHHcdZZZ2HIkCFimXA4jC984Qv44Q9/mHQdRx55ZMbtLS0tjSnzPumkk3Dsscfixz/+Me68807xOpIk4cUXX0y6v9X4Q4z+nDhutzurFgVa32Oq95PpfWr5jqldpxGo3R+hUAhf+MIX0NbWhh/96EeYMGECSktLsW/fPlxxxRUJRvl0n4+VzwOEcVCwQ5hOXV0dysvLEQqFMvY5efLJJzF27Fg89dRTMXe4yVIF+WTUqFEAIgZGpYrS2tqq6o7xggsuwPLly/HQQw+pCnaqq6uTyu5coVDLRRddhP/93/8VqazPPvsMy5Yti1lm3Lhx6Onp0bUnzZQpU3DZZZfhz3/+M37wgx9g5MiRGDduHBhjGDNmjKoASk/457dlyxaceeaZMc9t2bJFPJ+OcePG4eWXX0ZbW1tKdces96jlO6aFeJUpV9Tuj48++gifffYZ/va3v+Gb3/ymePyVV17RdXv0YNSoUTFVbJxkjxHmQZ4dwnTsdjvmz5+Pf/7zn/j4448TnleWdPM7KeWd05o1a7B69WrjN1QDZ511FhwOB+65556Yx5Xl2+mYPXs2zjnnHNx///145plnEp73+/34wQ9+IH4fN24cPv3005h9tXHjRrz99tuatruqqgpz587F448/jkcffRQulwsXXXRRzDJf+cpXsHr1arz88ssJf9/R0YFgMKjpNTk//OEPEQgE8Lvf/Q5AxENkt9vxs5/9LOFOmTGG1tbWrF5HDccddxzq6+tx7733wufzicdffPFFfPLJJzjvvPMyrmP+/PlgjCUt5+fvx6z3qOU7poXS0tKElFEuqN0fyc4DjDHccccdum2LXsydOxerV6/Ghg0bxGNtbW14+OGH87dRBCk7hHE88MADCf0rAOB73/sebr31Vrz++uuYNWsWvv3tb2PSpEloa2vDBx98gP/+979oa2sDAJx//vl46qmncPHFF+O8887Djh07cO+992LSpEno6ekx+y2lpKGhAd/73vfw29/+FhdeeCHOOeccbNy4ES+++CKGDBmi6o7473//O84++2xccskluOCCC3DWWWehtLQUW7duxaOPPooDBw6IXjtXXnklfve732Hu3LlYuHAhWlpacO+992Ly5MmqDNdKvvrVr+Kyyy7Dn/70J8ydOzfBM3T99dfj2Wefxfnnny/KbHt7e/HRRx/hySefxM6dO2PSXmqZNGkSzj33XNx///248cYbMW7cOPzyl7/EsmXLsHPnTlx00UUoLy/Hjh078PTTT+Oqq66KCfj0xOl04te//jW+9a1v4bTTTsPXvvY1UXo+evRoLFmyJOM6zjjjDHzjG9/AnXfeia1bt+Kcc85BOBzGm2++iTPOOAOLFy829T2q/Y5pYcaMGXjsscewdOlSzJw5E2VlZbjgggvS/s22bdvwy1/+MuHx6dOn47zzzlO1PyZMmIBx48bhBz/4Afbt24eKigr885//NNRPlC0//OEP8dBDD+ELX/gCrr32WlF6PnLkSLS1temujhEqMaPkixhc8LLNVD979uxhjDF28OBBtmjRIjZixAjmdDpZY2MjO+uss9h9990n1hUOh9mvfvUrNmrUKOZ2u9n06dPZ888/n7L0+De/+U3C9vDS80OHDiXdzvjy0mSl5/Fl9Lxc9/XXXxePBYNBduONN7LGxkbm9XrZmWeeyT755BNWW1vLrr76alX7rq+vj91+++1s5syZrKysjLlcLjZ+/Hh27bXXxpTLMsbYQw89xMaOHctcLhebNm0ae/nllzXtF05XVxfzer0MAHvooYeSLtPd3c2WLVvGjjjiCOZyudiQIUPYiSeeyG6//Xbm9/vTvqfTTjuNTZ48Oelzb7zxBgPAbr75ZvHYP//5T3byySez0tJSVlpayiZMmMAWLVrEtmzZIpbR8j7j15+uNcJjjz3Gpk+fztxuN6upqWELFixge/fujVnm8ssvZ6WlpUnfTzAYZL/5zW/YhAkTmMvlYnV1dWzevHls3bp1McupeY+p9luqcm4AbNGiRTGPqfmO8WP5iSeeiPlbvk+V5ew9PT3s61//OquqqmIAMpah83LtZD8LFy7UtD82b97M5syZw8rKytiQIUPYt7/9bVE2rtzGdJ9Psn3EtzPZ9z7+3JBsvydrA7F+/Xp2yimnMLfbzYYPH86WL1/O7rzzTgaANTc3p91nhDFIjJGziiCMoqOjA9XV1fjlL3+p27gHgiAKj+uuuw5//vOf0dPTQ6Mo8gB5dghCJ+Lb1gMQ3YWTjXYgCKI4iT8XtLa24h//+AdOPvlkCnTyBHl2CEInHnvsMaxYsQLnnnsuysrK8NZbb+H//u//cPbZZ+Okk07K9+YRBGESs2fPxumnn46JEyfi4MGD+Otf/4quri7ceOON+d60QQsFOwShE1OmTIHD4cBtt92Grq4uYVpOZs4kCKJ4Offcc/Hkk0/ivvvugyRJOPbYY/HXv/4Vp556ar43bdBCnh2CIAiCIIoa8uwQBEEQBFHUULBDEARBEERRQ54dROaz7N+/H+Xl5dTwiSAIgiAKBMYYuru70dTUlHZeHQU7APbv358wRZggCIIgiMJgz549KSfdAxTsAADKy8sBRHZWRUVFnreGIAiCIAg1dHV1YcSIEeI6ngoKdiBP8q2oqKBghyAIgiAKjEwWFDIoEwRBEARR1FCwQxAEQRBEUUPBDkEQBEEQRQ0FOwRBEARBFDUU7BAEQRAEUdRQsEMQBEEQRFFDwQ5BEARBEEUNBTsEQRAEQRQ1FOwQBEEQBFHUULBDEARBEERRk9dgZ/ny5Zg5cybKy8tRX1+Piy66CFu2bIlZZmBgAIsWLUJtbS3Kysowf/58HDx4MGaZ3bt347zzzkNJSQnq6+tx/fXXIxgMmvlWCIIgCIKwKHkNdlauXIlFixbh3XffxSuvvIJAIICzzz4bvb29YpklS5bgueeewxNPPIGVK1di//79uOSSS8TzoVAI5513Hvx+P9555x387W9/w4oVK3DTTTfl4y0RBEEQBGExJMYYy/dGcA4dOoT6+nqsXLkSp556Kjo7O1FXV4dHHnkEX/rSlwAAn376KSZOnIjVq1fjhBNOwIsvvojzzz8f+/fvR0NDAwDg3nvvxY9+9CMcOnQILpcr4+t2dXWhsrISnZ2dNAiUIAocfzAMp13KOBiQIIjCR+3121Kenc7OTgBATU0NAGDdunUIBAKYM2eOWGbChAkYOXIkVq9eDQBYvXo1jjnmGBHoAMDcuXPR1dWFTZs2JX0dn8+Hrq6umB+CIAqf7oEA5v5hFebf806+N4UgCAthmWAnHA7juuuuw0knnYSjjz4aANDc3AyXy4WqqqqYZRsaGtDc3CyWUQY6/Hn+XDKWL1+OyspK8TNixAid3w1BEPng8ff3YsfhXnywuwOhsGVEa4Ig8oxlgp1Fixbh448/xqOPPmr4ay1btgydnZ3iZ8+ePYa/JkEQxhIKM6x4Z4f43R8M53FrCIKwEpYIdhYvXoznn38er7/+OoYPHy4eb2xshN/vR0dHR8zyBw8eRGNjo1gmvjqL/86XicftdqOioiLmhyCIwubVTw5iT1u/+J2CHYIgOHkNdhhjWLx4MZ5++mm89tprGDNmTMzzM2bMgNPpxKuvvioe27JlC3bv3o3Zs2cDAGbPno2PPvoILS0tYplXXnkFFRUVmDRpkjlvhCCIvPPA2ztifveFQnnaEoIgrIYjny++aNEiPPLII/jXv/6F8vJy4bGprKyE1+tFZWUlFi5ciKVLl6KmpgYVFRW49tprMXv2bJxwwgkAgLPPPhuTJk3CN77xDdx2221obm7G//t//w+LFi2C2+3O59sjCMIkNu3vxLuft8Fui1RghcKMlB2CIAR5VXbuuecedHZ24vTTT8fQoUPFz2OPPSaW+f3vf4/zzz8f8+fPx6mnnorGxkY89dRT4nm73Y7nn38edrsds2fPxmWXXYZvfvOb+PnPf56Pt0QQRB548O2dAIB5RzeixGUHQGksgiBk8qrsqGnx4/F4cPfdd+Puu+9OucyoUaPw73//W89NIwiiQDjU7cOzG/YDAK48eQze/bwV3QD8IQp2CIKIYAmDMkEQRLY8smY3/KEwpo2owrEjq+GyR05rpOwQBMGhYIcgiILFFwzhH+/uAgB866TRAACXg4IdgiBioWCHIIiC5fmNB3C4x4eGCjfOPWYoAAp2CIJIhIIdgiAKEsaYKDf/5uzRcEbTVzzY8ZFnhyCIKBTsEARRkLy3sx2b9nfB7bDh68ePFI+TZ4cgiHgo2CEIoiB54K2IqnPJscNRXeoSj1MaiyCIeCjYIQii4NjT1of/bI40IeXGZI7LQX12CIKIhYIdgiAKjr+9sxNhBpwyfgiObCiPeU6kscizQxBEFAp2CIIoKHp8QTz23h4AwJUnjUl43k1pLIIg4qBghyCIguKf6/ai2xfE2CGlOO3IuoTnybNDEEQ8FOwQBFFQPL1+HwDg8hNHwxYd/KmE0lgEQcRDwQ5BEAVDry+Ij/d1AgDOmlifdBnRZ4eUHYIgolCwQxBEwbB+dweCYYZhVV4Mry5JugylsQiCiIeCHYIgCoa1O1oBAMePqUm5DAU7BEHEQ8EOQRAFw5odbQAyBDvCsxMyZZsIgrA+FOwQBFEQ+IIhrN/TAYCUHYIgtEHBDkEQBcGHezvhD4YxpMyFsUNKUy5HfXYIgoiHgh2CIAqCtYoUliQllpxzhLJDpecEQUShYIcgiIJA+HVGp05hATT1nCCIRCjYIQjC8gRDYazbyZWd2rTLUp8dYrASCIUpyE8BBTsEQVieTfu70OsPocLjwITG8rTLkkGZGIyEwwzn3/kW5v5hFYKUwk3Ake8NIAiCyITSr5NsRIQSGhdBDEba+vzYcrAbAHCox4ehld48b5G1IGWHIAjLo6a/DoeUHWIw0tLlE/9v6/XncUusCQU7BEFYmnCY4T2Vfh2Agh1icHKwe0D8v703kMctsSYU7BAEYWk+a+lGZ38AJS47JjdVZFzeTaXnxCDkkFLZ6SNlJx4KdgiCsDTcrzNjVDWc9synLJfdDoCUHWJwcbBLqexQsBMPBTsEQVgatf11OJTGIgYjLd2ystNOyk4CFOwQBGFZGGMxlVhqoGCHGIyQspMeCnYIgrAsO1v7cKjbB5fdhqkjqlT9jWgqSJ4dYhChVHba+sigHA8FOwRBWJa1O1oBANNGVMHjtKv6G+W4CMaYYdtGEFbikDKNRcpOAhTsEARhWdZ8HklhzRqrLoUFyMoOAARCFOwUKu29fvzm5U/x+aGefG+K5WGMoUVRek59dhKhYIcgCMuipZkgx60Idqj8vHB5duN+3P36dvzx9W353hTL094XiAnsyaCcCAU7BEFYkr3tfdjX0Q+7TcKxI6tV/51LUZ5OJuXCpXsg4jvZ196f5y2xPkpzMhBRdiiFGwsFOwRBWBLeNfnoYZUodasf42ezSXBE52dRsFO48M8u/kJOJMLNySNrSgAAvmAY/YFQPjfJclCwQxCEJeEl57M0pLA4VH5e+Piin11z1wCpFBloiQaEo4eUimOffDuxULBDEIQl0dpMUIkIdkJ0d1uo8GBnIBBG10Awz1tjbbiy01DuRk2JCwDNx4qHgh2CICzHoW4fPj/UC0kCZmYT7ER9Oz5SdgoW5WdHqaz0cGWnvsKN6tJIsEPzsWKhYIcgCMvB/TpHNZSjssSp+e8pjVX4+CnYUc3B6BDQhgoPakoj3xfqtRMLBTsEQViOXPw6AAU7xYAvKKcgmzsp2EkH77FTX+5GdTSNRZ6dWCjYIQjCcnC/zqyxtVn9veiiTH12ChZSdtTDPTt15R7URNNY1GsnFgp2CIKwFJ19AXza3AUgO78OIDcWJGWncIn17PjSLDm4YYyhRaSxSNlJBQU7BEFYinW728AYMLauFHXl7qzWQWmswkf52TWTspOSzv6AUDDryt2k7KSAgh2CICwFv0sdU1ua9Trk0nMKdgoVpWeH0lip4apXdYkTboddrsYiZScGCnYIgrAUPEBRDvTUCpWeFz7KQJWCndTI5mQPAFCfnRRQsEMQhKXg6Yucgp08pLFaugZw20ufYm97n2mvWcwoP7tD3T4ESaVLCldC6ysiKd/qaOk59dmJhYIdgiAshVB27LkEO/bIukwMdh5Zuxt/emM7/r56l2mvWcwoVbkwAw730MU7GQfjlR3u2aFhoDFQsEMQhKXgAYpThzSWmZ6d1ujFmJq56UN8oEqprOQkKDvRNFYwzNDtozEbHAp2CIKwFCKNlZOyY34aq2sg4pHo89M8Lj3gyk6pK6LSUUVWcrhnpyFauehx2lES3WcUeMtQsEMQhKXgAYo7B2UnH312uvojwU6vn+6m9YB/diNqSgDI85+IWGRlxyMeo147iVCwQxCEpdClGisPpefd0cncfT5SdvSAl56PjAY7pOwkh3dPrlf0pKJeO4lQsEMQhKXQJY1lz18ai5Sd3AmHGQKhiLlWBDud1EU5HsaY8DI1KJSdqujw3DYqPxdQsEMQhKXgakxOBuU8Kjv95NkRfLyvE7/7zxbN+0T5uY2sjaaxuknZiadrICi8TXVJlJ0OUnYEFOwQBGEpCtagTJ6dBG57eQvufG0b3tjSounvlGXnI4SyQ8FOPNzHVOl1wuO0i8fJs5MIBTsEQVgKXZoKmpzGCobC6I2qF+TZkTkU9ZPwFJ9auF9HkoAR1V4A5NlJRjK/DkCenWRQsEMQhKXQw6DsNFnZ6VH0M+n1B6mZWxSudmkd26FU97gXpXsgiD5SzWLgfh3eY4dD87ESoWCHIAhLoUvpuclNBblfB4h0+6WZXBG4Z8QXyC7YcTtsKHM7RN8YPvSSiMCVnYZyT8zjNB8rEQp2CIKwFAFdxkWYq+x09sdeVHqpcy0CitSecoK5GnwilWmHJElojKo71EU5Ft5jpy5B2aH5WPFQsEMQhKUQ4yIKKNhRKjsAdVEGYgPAbNNYXN1roGAnKQdF9+Q4ZUcxH4uIQMEOQViYwThJ26ejQdlnUhor3oBLFVm5BTu+hGAnolxQRVYsh+LmYnFEGqvPj3CY/GNAnoOdVatW4YILLkBTUxMkScIzzzwT83xPTw8WL16M4cOHw+v1YtKkSbj33ntjlhkYGMCiRYtQW1uLsrIyzJ8/HwcPHjTxXRCEcfzf2j340xvbcf+bO/K9KaahawflPCk7vVSRhY4+RbAT0NhnJy7gbajkyg55dpTETzznVEWDnTDTXglXrOQ12Ont7cXUqVNx9913J31+6dKleOmll/DQQw/hk08+wXXXXYfFixfj2WefFcssWbIEzz33HJ544gmsXLkS+/fvxyWXXGLWWyAIQ2nrjZzc93f053lLzEOX0nMR7JgTdHTFeXaosWDsPtGu7ET2n1B2yimNFQ9jTHh2GuKUHZfDhnK3AwBVZHEc+XzxefPmYd68eSmff+edd3D55Zfj9NNPBwBcddVV+POf/4y1a9fiwgsvRGdnJ/7617/ikUcewZlnngkAePDBBzFx4kS8++67OOGEE8x4GwVJvz8Er8ueeUEir3RHja686mIwoGtTQY1pLMYYBgJhzd+NBGWH0ljo6JcvslmXnkc/x8aoskO9dmR6fEH0RxWzeGUHiJSfd/uC1GsniqU9OyeeeCKeffZZ7Nu3D4wxvP766/jss89w9tlnAwDWrVuHQCCAOXPmiL+ZMGECRo4cidWrV6dcr8/nQ1dXV8zPYOKljw9g8s0v4fH39uR7U4gM8IvoYJr4HNAjjZVlU8EfP/0xpv/iP9jTps0jFZ8qoH4wQKcyjZVlNZbbEQk6yaCcCE/plXscSYNzudcOpbEAiwc7d911FyZNmoThw4fD5XLhnHPOwd13341TTz0VANDc3AyXy4WqqqqYv2toaEBzc3PK9S5fvhyVlZXiZ8SIEUa+Dcuxdkc7wgxYv6c935tCZKAnGuwc6vENmkZ1eig77iw9O+t2tWEgEMbH+zo1/V18Gos8O0BnvxzwDWTZZ8cVZ1Bu6Ro834NMtAi/jjvp8zXRYaBUkRXB8sHOu+++i2effRbr1q3Db3/7WyxatAj//e9/c1rvsmXL0NnZKX727BlcCgeXNclXYH14Z95AiKG9b3DcoeXToMwvyh392vZ1Yuk5KTuxaSytyk6sZ4enafyhMHlQosh+ncQUFqBQdiiNBSDPnp109Pf348c//jGefvppnHfeeQCAKVOmYMOGDbj99tsxZ84cNDY2wu/3o6OjI0bdOXjwIBobG1Ou2+12w+1OHg0PBvjJol9jhQRhPt2K9MjBrgHRP6NYCYcZAqHInbsuwY5Gz85A9DvRoTGw5GmsCo8DXQNBUnYQV3quUdmJbz/gcthQW+pCa68fB7t8qC0bvOdvTmZlh3rtKLGsshMIBBAIBGCzxW6i3W5HOBz5IsyYMQNOpxOvvvqqeH7Lli3YvXs3Zs+eber2FhJC2dF4AiLMRzlzaTCYlJXBiR6enUCIaeozwm8AlKqEGriyw420pOzEe3Y0prFCiSNDyLcTS4vosZNB2aFgB0CelZ2enh5s27ZN/L5jxw5s2LABNTU1GDlyJE477TRcf/318Hq9GDVqFFauXIm///3v+N3vfgcAqKysxMKFC7F06VLU1NSgoqIC1157LWbPnk2VWGngB/8ApbEsjzI9MhhO8gFlsKNDNRYQuXB6bOqqq7gC0ZmlstNQ4cFnB3vEmITBTGxTQY1prEBiKrOx0oPNB7oGxfdADQdTTDzn0OTzWPIa7Lz//vs444wzxO9Lly4FAFx++eVYsWIFHn30USxbtgwLFixAW1sbRo0ahVtuuQVXX321+Jvf//73sNlsmD9/Pnw+H+bOnYs//elPpr+XQoJL9JTGUo8/GIYk5TbCIJvXVN4RHxoMyk7QoGDHmTnYCYWZUBS0prGEshO9y+6j2VgxvqfslR35cxNdlAsg2DGjtUeLmHieQtkpIWVHSV6DndNPPz2ts76xsREPPvhg2nV4PB7cfffdKRsTErH4giGRGqFgRx2+YAhzfrcSlV4nnlt8MiRJMuV1e+IumIOh/Jxf5Bw2CTZb9vtZGSipNSkr1QctaSzGmKjGGhpNY5Gyk6NnJ4myUyhprH+s3ombn92EP3/jOHxhUoNhryNPPM+k7AyOwoZMWNazQxiD8o6VqrHUsb2lF3va+vHxvi7Nhtdc6Imr8BkMrfL16J4MAJIkae61o/w+aFF2BgJhBKO+ID7WYLB/txhjOfXZ8Ydiq7EAKCafW/t78O+PmiOtPXYb29ojk7JTwyefk7IDgIKdQYfywB8gZUcV2w/1iP/3mVhlE9+ojldfFDN6BTvKdagNdgYUy3VqKD3nn5NNAuqiVUKDvYPyQCAcc2OgeVxEGmXHysNAQ2GGjXs7ACQqs3rS4wsK9TCVZ4ensTr7AwiaeJNmVSjYGWQoyxApjaWOmGDHxH0Wf7K0+h2tHogeOzp4o7SWnyuDfy3KDm8PUO5xoiw6j8jMoNiKxKcBs/XsKI+DQkhjfXawG33RICRemdUTruqUuR0odSd3o1R6neAZd619o4oRCnYGGcoGUwOBEHUjVcH2Q73i/2YaT/nJckhULTjUXfzdY7kKo4cRPJc0Vn8gpFr55J2CK7wOlEQvPINd2eHKmNMeudqGwiym0i4TXNlxOxMNyq29ftOm2Wtl/e4O8f8uI4OdDJVYAOCw21DppS7KHAp2BhnKgz7MtDddG4xsb5GVHTONp92+yAVjbF0pgMhnpbVKqNDwi5lI+ik7alWFeF+J2lSWUHbcTpRGK3D6Brlnhx+ndYrmf1rUHVGNpQh6a0pdIoC1akpX6dPp8Rn3XT0o/DrpmyvWUEWWgIKdQUb8ULgBPwU76QiHGT4/rEhjmXjHzpWd2lIXqqNzboq9saAeoyI4mj07cRVDagNLfgcfo+wM8tJzHijWKcyzPg0pYDEuwikfB5IkiYu7VVO66/d0iP8b6dk5JJSd5OZkTjVVZAko2BlkxDeYIt9OevZ39sdcBM30YnRHT5Zlboc4qVn1jlYvdDUo27V5duIrqDpUNmPjZeflHidKomkXXzA8qE2hvBKrusQpPgdNyk6KYbBW9u109gewTaECx89L0xM1aSxANilTY0EKdgYd8XImBTvpUfp1AHO9GPxkWeZxWP6OVi/0mHjO0V6NFRfsqE5jRZUdjxMlbtljYqaZ3WpwZafS6xQpyWyCHaWyA8jl51asyNoYVXUc0f5QRhqUebCXaggoh8rPZSjYGWQkKDuD3FuQic8VlViAufuLnyzLPc7Bo+xYKI2ldmREl6jGcsBlt4mL3WCuyOLBTpXXKQIWLb12xCBQe2wXYqHsWPB7wM3Jx46sBmCwsiPmYmVQdkppGCiHgp1BBik72tgeF+yYaVDmOf9yt6zstAwSZUePaiy3KD1X95nFfxfUdlHmBuUKrxOSJKEkalIezBVZfN9FlJ1oak9DF+VUyg6vyDpoQWVn/Z6IOfmU8UMARAJ3rc0U1XJQTDzPoOxwgzKlsSjYGWzER/jUWDA921siaSxv1IvRb2oaK3IRLfM4REt4UnbUo7X0PN5Aq9qgzEvPPRFzMu97MphVU16OX1niUig7GkrPU6Qz+VR5q83HYoxhQzSNdXI02AGMS2UdImVHMxTsDDJ4hF8b/RIM5hOyGriyM7mpAoDJpecijeUQLeEHi7KTnzRWtp6dqLLjifgjhLIziCuyuLk7RtnJIo0Vr+yIdK7Fvgc7W/vQ0ReA22HD5KZK0YLAiFRWnz8oihcyGZRlZYeqsSjYGUT0+0PCl9BU5Y08NoiUnWAorKkpX9dAQFQ9HD2sEoDJTQVjqrGi8n2xKzvBxP4q2aK1zw7/Ltijnhv1nh259ByQlZ3B3GunS+nZiX4O8Z6odPDAKJ2yY6UGm7y/zjHDKuFy2FAWVfmMKD/ngV6Jyy46dqeiupSaCnIo2BlEcFXHZbehtiyq7AyCYOf9nW349t/fx/j/9yJ++5/PVP/d59FKrIYKt5CLzbyAKZWdBoWyY6WTvN4EjEhjqR4XEVmON8LT6tkpj1d2BrVnJ1qNVaKsxlL/3Uml8DUovofdFlLOuDl52ogqAPKxYISyoyw7l/g8iBSI0nMKdijYGUzwA7661ClOyMXq2QmHGV7e1Iz597yDL927Gq9sPgjGgH9t3Kd6Hbxz8ri6MpS6zL9bFwZljxN1UWXHFwwLj0gxouu4iCzTWHxyuXbPTuQCJ44VqsaKVmNpMygzxuQ0liO2GqvE5UB5VDVpsZBvh5uTp0crsbji0j2gf/roYIZp50pqonaFbl/QsiM2zIKCnUEEr8SqLnHBIwy3xXVCHgiE8Mia3Zjzu5X433+sw7pd7XDZbfjyjOGQJGBPW7/oPpoJ7tcZV1eWl7t1bm4sczvgcdrFnJtiNin78lh6zlXOoRXagp1uRek5AHgHubITDrOc+uwEQrJymew4kHvtWMO30+8P4ZMD3QCA6SOrAMjHgiFpLJUNBYFIAG7jw0AHeUUWBTuDCN5jp6bUJVcXFZGy88ia3Tj516/jx09/hM8P96LC48CiM8bhrRvOwG++PBXj68sAQFRNZEIOdkpRYrKyMxAIifQLz//Xi4osa5zkjSCfBmWuPHBfiJrZWMFQWJjWK7xxyo6Ox8re9j787pXPCuKC1e0LgmdaK7za01jK5ZLNSOOfj1W6KH+0rxOhMENDhRtDo9tmaLDTpa7sHABsNkmksgZ7+Xl6dxNRVAhlpwiDnWAojJv+9TGCYYZhVV5cefIYfHXmiBgD37QRVfjsYA/W727HFyY1ZFwn7548rr4MwXDk7G3WbCzlSbIsevFsqPBga0uPZU7yRqBnB2W3Zs9ONI0VVQ56fEEEQuG0KTXl58QvcLyLsp7VWH99awcefHtnJOj5yjTd1msE3NjtcdrgcdoV1VjqPgdlcJrsOOAXeauUn3Nz8vQR1cJDI6exjFN2GjKUnXOqS11o7fUP+i7KpOwMIrhnp6bEJaT2gSJJY/X6QyIg+e/S07Dw5DEJlQo8n87NhOkIhMLY1RoNdurKxMwjs3wYyhSWLapDk7KjjWzTWBHjZ+SxTOoO9+t4nXYRFBmh7PDv7vMbD1g+jSn7dSKKguizo9KzI3ot2W3i2FfSWMlHp1hjP/DzCU9hAUCZ20iDsrqJ55waYVIe3OXnFOwMIriMWV2q8OwUibLDFRenXRKBXDz8ZLRxbwdC4fQVTXva+hAIMXiddjRWeEwvJ+5WBDucugprneSNgFdjJUtfaCVbg3Kp2yHMxpl8O11xfh1AVnb0VAF5qswfCuPhd3frtl4jUHZPBgCPxj47PChKFfBaaRgoYwwf7I41JwPKNJYRBuWosqMijQXI5eeDPY1Fwc4ggkf2NSVORRqrOBz6vVHFhXtrkjG+vhylLjv6/CF8drA77fp4CmtsXSlsNvNHAHT7Ei+iDWI+VhErOyEdq7H4tG2Npecepw1VJZELRGeG8vMuxagIDld29GxAqUyJPbxml2FjCPSgU1F2DkBzB2V/hoCXBzvNFmgseKBzAC3dPthtEo6J9uIC5O+tIcpOl0Zlh7ooA6BgZ1AR49lxFVc1Fr+LLk2h6gCRZnFTo30wMqWylJVYAGIMymb0uelRTDznyPOx8n9HaxT6prHsMevMBJ967nHaUeVVqez0y72QODww1rMBpTJwOtzjx3MbD+i2br3h+4wrO5oNyhmUHV6NZYX5WPw8MnFoeYyizBVZvcdFDARCoollnVplhxuUKdghBgvJqrGKpc+OUHYydBTlqawN0b4YqVD22ImsN7K/QmGm2vCaC8nSWKKxYBErO6lmImWD5jRWNKDwOu2ojF4gMgU78aMiALmDsp7KDg+c+JDJB97aYdnmksoeO4DcK0dtB2U+uDWTsnOox5cxHW00/DwyfUR1zOOiqaDO1Vi8e7LHaROz2DIhlB1KYxGDhaR9dook2OkPZFZ2APmkpFrZqS8FAGFQBswxKfMqH+VFVBiUi7iLcl5nYwV5Gkuh7GQyKA+kUXZ0THlyr9j/nDIWHqcNmw90Yc2ONt3Wrydd/amUHW0tAFIdA0PKXLBJkRuP1p78Bv7xnZM5ZQalsVoU084zdU/mkLITgYKdQQJjTNyl1hRhGkuNZwcApkWVna0tPSkrbRhjctl5VNlx2G3i5GuGb0c5F4vDS277A9Zqla8n+R0XwdNYCs9Ohrvh7iSenRIDOijzY25YlQeXHDscAPDg2zt0W7+eJKSxRDWWyjRWhmPAYbeJjuIH8+jb8QfD+GhfJ4DYSixAkcbS2aAszMkq/ToAKTscCnYGCb1+uUlddUnxpbH4XXRJBmVnSJkbI2tKAAAf7u1Iukxbrx+d/QFIEjBmSKl4vNTEAJEbX5WeHa/LrmiVX5ypLGXZca64NSg7jDGhcnq1KDtpPDt6BsV9imD+WyeOBgD8Z/NB7G7t0+019EKksUpi01halZ34URFKZJNy/nw7nzZ3wRcMo9LrjDlPABApJr09O0plRy3VpVR6DlCwk1d8wRCCJvg/ANmJ73Ha4HXZi66poFrPDiDfhaVKZXFVZ3i1V6T7APmOXU8vRip6kqRHAGUqK//mTCPIVxrLHwqLrr9unTw7eik7/mBYBIGlLgfGN5Tj1CPrwBjwt9U7dXkNPeGl5xVZGpTVBLxWCHaU/XXiU0rKNJaeKWeu7NSpGBXBqaE0FgAKdvJGIBTGnN+txHl3voWwCSa7NkVDQQDwuiIffbEEO2qqsTjTRUVWcpNyfCUWx4gqm1QkS2MBxW9SNiTYUXFDoTTPepw2DZ4dHuzIn1OpQtnR40Kn9P7w9POVJ40GADz23h5Dhk3mQmdU7aqKnmu0enb4McDTX8losEBlorJzcjz8exsMM9XvWw3Nnf0A5POAGnifnf5AqGhsC9lAwU6eaO4cwJ62fmw52I3PWtL3fNEDZUNBAEU3CJSrLZk8OwAwjXdS3tOR9GIUX4nFKTGxsWB3JmXH4l10s0XPcRHCs6PiYsPTuTYp8nfqPTtRI7nSsxM9TsJM/QU+HfzYdil8Y6eOr8PYulL0+IJ4ct3enF9DT/g+kz072qaecwUo3TEgDwPNY7ATnbEX79cBIgocF3v0Min7g2G88dkhAMDRwypU/12Z2wGnPbIxg9m3Q8FOnjisqCJYa0JVhRgVEQ12eBrLFwyboiwZDVdbSt2ZlZ1JQyvgctjQ0RfAziSeh1TKTqmJjQXlNJYz5vF60T22SJUdA6aeqwk4ZHOyHZIkiWBHvbIjf05eZeWeDoFxsmPbZpPwrZPGAABWvLPTUt/h+NJzj9Y0llB2rOvZae3xYVf03DE1rhILiHw+fKadXsrbG1ta0NEXQH25GyeOG6L67yRJooosULCTN5QHnRklpMqycwAxDbAGLNyNVS1alB2Xw4ajmyJ3RslSWXIlVqzpsMREg3J3ijRWsc/HMsazk/nzkrsnRz7jSq9az06iAme3SSLg0WMYaKpje/6xw1DhcWBXax9e+7Ql59fRg4BiCnyCsqPWoKxC3RPp3DwF/Ruiqs4R9WXifcZTpvPk86fX7wMAfHFaE+xJZoalg9/kZjqeixkKdvJEa48c7Kzd0WZ43xRlQ0FAnlcDFEcqS3h2VCg7gDzHhp+0OAOBEPa0R+7YxtXHe3bMMyh3J6nGApTKTpGmsXSsxtJSeq6sxALkSqKugUDaxnW8p0xF3AWvVMzHMkbZASLH49dmjQQAPGCRMnRlO4dEg7J+np3GyvwqO8KcnETV4ZTrWJHV2RfAq59EAtqLpw/X/PdC2aE0FmE2rQpl51C3L2k6RU/aomWH/KC32SRxEioGk3KfBmUHSF2RtbO1F4xFDKe10cCQIy5gJhqU47ukNkSVnUOk7GRES+k5T2PxCyy/W2csdRqCMZbSWyUHxsYpOwDwzdmjYbdJeGd7Kz450JXza+UKVw7KPQ6hPvDPQW2bCzWeHa7sdPYH8tI+Y320c/K0JH4dDldlu3QIdl746AD8oTAmNJZjUpN6vw6H5mNRsJM34jt/rt3RaujryZ4dhbfAVTy9dniZr5pqLEBWdj450BWjbG1viaaw6ssSykm9zqhB2eD9xRiTZ2O5k3t2irH0PBgKg4soeo6LCDNkbPEgPDtRxdNpt4mLVSrpvz8QQjC6wRVx3iq5ci/3Y6U3jR9tWJUX50xuBACseHtnzq+VK/E9dgDtaSw1yk6FxwFP9HmzVc5QmGHjnmgzwSSVWJyy6DGhRxrrqQ8iJvRLjh2W1d+LyecU7BBmww86fkI12rcTX40FyJJ9v7/wJ5/zO2ivymCnqdKD+nI3gmGGj/d3isdTmZMB85SdgUBYXEQT0lhRZafXH9LNC2AVlOkmPT078etOBg92lMdPZYbyc67q2G1SQjNLPRsL9oqGmclVyytPHg0AeHrDvpRdwc2CT4lX+liUCpuadD0PitxpAl5JkvJWkfX5oR70+IIocdlxZEPieYIjp7Fy+0x2t/bh/V3tsEnAF6dlF+zwliNUjUWYzuFosHPWxHoAwJrPjQ122uP67AAoqsaCPI1VqqKpIBA5WcqpLNmknC7Y8YoLmLH7qzvaYl6SEpWqUrdDBMjFpu4EgvKFUM9xEUDmVJZsUJb/RlRkpbhAcL9OuceRoAKKxoI6BDuZVMtjR1ZjzJBS+INhfJCid5RZyJVY8nlGOdBTjbqjNpXJb9wyVczpDa+EHFFdAkeagKzcrc98LG5MPumIIZr66yjh+4qUHcJ02nojX5gvTGqA3SZhX0c/9rYb59tpT6LsuIso2OFSf6ZxEUp4Kkvp25GDndKE5Uujd9ZGG7p7FBPPkw37q7fAXCAj8IXk/erQWG2SDIfdBr6azMFObBoLkIOdVGoJLzuP9+sACmVHjzSWMN8nD+QlScKxSY7lfBA/FwuIHfugJtgRyk6acRGAYv6UziMZMtGnUkWW52Nlv32MMTy9PpLCunh6dqoOQPOxAAp28gavxhpZU4Kjh1UCAN7baYy6Ew4ztCuGgHK80bvY4qjG4ne/6pQdQNlJuQNAZD8pPTvxGDHzKBnC9Jri4lZfoa6xYDjMcPfr2/Dm1kP6bqBBKO/o1U50zoTaXjs84PcoLmBVGcrPufE03q8DyMehLsqOCtVyWhKVMh/wwLBS4dlx2iXRYE9Nrx21yk65zqXdauHHSqYbK94jK5ehvev3dGBnax+8TjvmRr1Z2SD32aHSc8JEGGOiGqum1IVZY2oAGNdcsHsgKMpnlcbBYjEoM8ZkX4PK0nMAOGZ4Jew2Cc1dAzjQ2Y/mrgH0B0Jw2CQxLFSJEdOsk8FP3vENBTl8CGCmiqxVWw/hNy9vwU+f3aTvBhqEX4VXQytqy89FGkuhJlSKNFaKYKc/jbLj1lHZUaFa8sB9w56OvDYYFMGOQtmRJLnyU00XZVnZSX8c6KGcZIPayk/lfKxs4cbkc45uVJ2iTwZVY1Gwkxd6fEFxYq8tdeP40ZFgxyiTMjcnl7kdMdJwsXh2BgLyEEctyk6Jy4GjGsoBRNSdz6PNBEfVlsCZ5ILLL2B9AXOUnXhzMofPBcpUhbLqs8MACidPr2f3ZI4reryrTmMpPTvCoJx8/3WrUHb0+G6pUS0nNJbD47SheyCIzw/35Pya2dLZF9s9mePRUJElSs8zBjtR5cTkNJbalHm5OzeDsj8YxvMfHgCQWwoLUHh2+vyG93SzKhTs5AF+8Slx2eF12TFzdA0kCfj8UK8h/VNE9+TS5CegQk9jKdNK3jQt5pPBTcob9nSkNScDitSEwcqOaCiYKo1Vrm4YKE9fdek8edkouEFZz2BHba8d3kVcOeVeno+VybOTGOyIPjs6qA5cuUinWjrsNkwZXgUA+CCPvp1kyg6gbfK5X61nR6SxzE3N9PvVprFyU55eV4yHOOkI9eMhksELU/zBsCmz/awIBTt54HDUr1NbFjkAK0ucQmEwwreTrBILKB5lhwcfJS47bBqNrbJJuV0OdpL4dfj6AeM9O3IaK71nJ52yc6CzH1ujA01DYVYQn7E/pO6OXgtqJ58P+GM7KAMKz06G0vMKb+LnpGsHZW5QzqBapmqUaSYdKYMdLcqOSs9OvgzKSdoUJCPXNNbTH0SqsC6aPkzzeIh4vC67UC0LRenVGwp28kCb8Ou4xWNG+naS9dgBisezw9NKarsnK+EXiA/3duLT5sj0+VTKjmgUZ1I1VspgR4Wy8+bWwzG/d/VbvycPv8glSyFmi9rJ58lKz7lnJ1UFi+zZSVR2RJsCPWZj+TIblAG5wV0+TcrJDMqAti7KfrWenXwZlFUqO2U5lJ539PnFvLNcU1icwd5rh4KdPMC7Jw9RBB+zxtYCMMa3k1HZKXBZs9en7uSTjDG1paj0OuELhvF+VFVLVnYOKHunhAxNC6UaAsoR1VhpSs8Tgh2dJi8biajC0TPYySWN5U2fxpI9O0mUHZd8rOSKrOykP7554P7Zwe68NZxMVnoOyN2Q9fXs5MugrO7mqjyHDsrPfyiPh5g4VPt4iGQM9l47FOzkAWUlFmdm1KT8aXNXypNrtqRSdjzFksbyqzMMJsNmkzAtWsnCi1jGplB2+N16KMxUt77PBnneUvJqLN5YrMcXTFraHA4zvL0tNthJNd/JSug5F4ujuvTcn8yzkz6NxQPIZAZlXTso82A+g7LTUOHBsCovwgz4cG9Hzq+rFcaYULuq4m6sRBpLRTUWTzlaVdnpzcKzo/XmiDcSzHY8RDKqSdkhzKZVeHbkNFZduRtj60rBmP6+nfYkwRUgX7wLPdhRK/OnYrpimN+QMnfCXSmnRHEhNFIN68mg7JS5HeJEm0zd2XygC229fpS67JjQGPGCFUIay5BqLLWl50Gexko0KHf0+ZOWc6f37OhnZler7AByv50Nezpyfl2t9AdCYj/nYlDmAZFVPTta01haPXO7WnuxLsfxEMmQlR3r3/gYAQU7eYB3T46fqi18OzoHO/zgrorLo/M0VsF7dnJQdgDZpAykTmEBkYoXftI20qQsqrFSeHYAZRflRJPyqmgV1uxxQ0SAWwhprIDKO3otqE5jJSk95xfsMAN6knzeXMWwirIDJDbKNBPu13HYpITAzK1SYQOsr+zIHZTTfx4lLrvo4K0lINNjPEQyargHjdJYhFnwNBavxuIcP8aYfjtctixaz04W3ZOVTIuW7AKpK7E4ZpiUe9J4QThi+nkSk/Kb0f46px45REjpXSbf/WaD3wiDssZgR1mN5XHKFSzJUsvp0o1Kf1cu+INhcfFXo+woR6CY3W5A6deJ74CtqRoroG1chNl9doSyk6HNhSRJYhvVfv8i4yH0T2EBsb12BiMU7OQBnsaKTysdPyZiUv54X6cuVRyc9t4Unp0iSWP1qehDko7KEifGRhWdVJVYnBIdjaepkNNYydNpQGplp88fxPu7IsHyKePrhOpQUJ4dQ4Kd9J+XrOzEHkPpRkYIz06SNJYcFOf2PVbeiKipNpzcVAGnXcLhHh/2tvfn9NpaSVWJBSgMymqqsVSmM8uj3w9fMJwxmNWTPpVpLEC7SXnj3k7sau1DiSu38RDJ4Nebth4KdgiTaI2msYYoPDsAMKzKi2FVXoTCTNfpxULZiffsCIOyeScKI8hV2QGA/zl5LI5sKMM5R6c/wYj+KQZK55k6KAOySTm+CeWaz9sQCDEMr/ZidG2JONkWgmdHbX8VLbi1jotwxr628O3EdVEOhOTmbEmVneixOBAIi1Et2cDTYC67TdV+8TjtmBSt3llvsm8nVSUWII/hyKTsBEPy/sqUxipV3NzoeXOYCRHsqEgrCpOySmVne7Q31vSRVVm10khHDSk7hJkwxhR9dlwJz88aq2+/nVCYiWqS6hRprIECT2PlquwAwNdnjcR/lpyGYVXetMvxPH2vgfusO800bQ5XduLTWNyvc8r4OkiSJFSHglB2DBkXoS6N1Z9C2eEX7nhlR3nxSjcbC8hN3enLYuabslGmmYhKrCTBjtrSc2VQmuk4cNht4hxmpm9Hi0dQTrWp+/7xG+G6uBthPaih0nPCTLoGggiEIncuSYMdnX07nf0BMTcqwaDsik49L/Q0ViB3ZUctpTqlJ1LBGJM7KKe5c0zVRZn31zllfKS9vFB2CsCzY8S4CO0G5bg0llB2Yi9WPIXlddqTeoxcdpvoeptLylNUGmo4tvPVSZmrX8mUHbXVWMrPSU06U49hm1rpS9JtOxVi+1QGY8kqdfWCZxJ4n7fBBgU7JsOj6jK3I+HECsi+nQ17OnSpkuKvV+FxJJyUi6bPjsrBfHpgtEG5zx8S/X7SprGSdFHe39GPbS09sEnAieMix1GFp5CUnWgzOQM6KPsypLF8Io2V3LPTGSf9pys7ByLm1BIduij3ZlFpyDspb97fparUWy86U/TYAdT32eHKj90mwaHiOCg3ubGgsseWJs+OymDsUDQQiS9e0QN+c93RH0Aww/ehGKFgx2RaMxzMo2tLUFfuhj8YxkYdcu6p/DpA8aSx5CZfxis7RhuU+UnbbpPS3jkmU3beiqo6U4ZXiQuO7NkpgGDHwKaC6ZSdUJiJ9En8Ppd77cQpO2lGRXD06KKspeycM6LGi9pSF/yhMDbt78r6tbXCg50KHZQdte0HzB4GqrwxVHO+0VoxxpWdeD+nHlSXuCBJAGNAu86NawuBrM8qfr8fW7ZsQTBofXncSiTrnqxEkiRRgq6Hb6ctRSUWUDxNBUXTtRw8O2ox2qAszMluR0L5rhJeet49EBQKIPfrnDpenpAse3as/z3N17gIpYIab1CuTJnGytwegPtsclF2tDQU5EiSlJdUFg8I03p2Mio72obBml1+zj8PSUo8VpJRrjEYk4tX9Fd27DZJ+DYHo29H81mlr68PCxcuRElJCSZPnozdu3cDAK699lrceuutum9gsSFysqWpI3c9mwummosFyHexwTATDd0KEXk2lvHKjtdprEFZNBTMcCdf7naIk21Lly9mRMQpR9aJ5SqEZ8f6d3L5MijHBDuO5GmsBGVnwFxlR2t38HyYlDtTTDwH1PfZ8WkMeM2ej6XssZPuZoSjNc12uDvz9SEXeCPbwejb0XxWWbZsGTZu3Ig33ngDHo/c3XHOnDl47LHHdN24YiRV92Qls6K+nXW72nMOQlLNxQJi/QmFrO5kc/ebLVzZ6TfIoCzMyWkUAyBy987Lzw92D2DT/i609wVQ5naIWV8AFH12rK/sGFF6rmZcBD/2XQ4bbLbYCxhPY3X2p/LspA529OiinO2xnY9OyrJnJ/s0Fj8G3CpUE0CRxjLp+ObBZ6buyZwyDU09GWOyslNuTLDDMwqtpOxk5plnnsEf//hHnHzyyTGR7eTJk7F9+3ZdN64YOdyTvHuykvH1ZagqcaLPH8o5555qLhYQOQHxj7CQfTvZ+BqypcTg0vOeAXXBDqAoP+/yKUZE1MYY0Xkaq88fsrx6x6sU9UxjuVUpO1FzcpIgqypF6bns2Un9OekxHyvbY3vKiCpIErCvox8t3YkjRYwgrbKjtvRco7JjtkG5P6DNMF6mYX6XslI33c1wLgzmiizNV4dDhw6hvr4+4fHe3l5Vst5gJ12PHY7NJmHm6Bq8svkgfvn8ZoweknxeU0OFG9fNOTJte30+Fyu+xw4QUQe8Tjv6/CFxwi9EzFR29OqMmwqlZycT9dGKrINdA3gziV8nfj09A8GkCp9V4F2OnXlKYyWrjkzl2RHKTpo0lh7HSrbHdpnbgaMayvFpczc27O7A2Tp3401GuqaCWquxMo2K4Jhdeq6lezKgrYPy4WgAkqpSVw8Gc68dzWeV4447Di+88IL4nQc4999/P2bPnq1pXatWrcIFF1yApqYmSJKEZ555JmGZTz75BBdeeCEqKytRWlqKmTNnCp8QAAwMDGDRokWora1FWVkZ5s+fj4MHD2p9W6aRqntyPCdFS4ff39WOJ9ftTfpz9+vb8fKm5rTrkauxkp+UvUVQft6roaNprhhdet4t0lipL6IcXpG1MzolGYg0E1TisNvEhdLqvh1RiWOEQTmNqiXmYiW5gPGbhM6+QMysqS4VjR9LdVABe0RbBe3HNk9nmtFJORxmYp8kGxfhcapLY2mtyOMjVcxSdkSPHdXBjvrWD3IllnE3JDyjcHgQBjuav0G/+tWvMG/ePGzevBnBYBB33HEHNm/ejHfeeQcrV67UtK7e3l5MnToVV155JS655JKE57dv346TTz4ZCxcuxM9+9jNUVFRg06ZNMV6hJUuW4IUXXsATTzyByspKLF68GJdccgnefvttrW/NFFLNxYrn0uNHwumwpWzz/9a2Q3h7WyvWfN6G86c0pVyPqMZKouwAhd9rJxiS5+KY49nJPTWRjh4VoyI4XNn590cHxIiIUbUlCcuVe5zo9Ycs79sxxKBsz2yMldNYiccP95/4Q2H0B0Ii6OgeSF1mzfHqouxwg7L2Y3v6yCo8+t4eU0zK3QNB0bw0F4Ny1qXnJh3b/RqVHS1pLLktiTF+HUBOjw3G+Viag52TTz4ZGzduxPLly3HMMcfgP//5D4499lisXr0axxxzjKZ1zZs3D/PmzUv5/E9+8hOce+65uO2228Rj48aNE//v7OzEX//6VzzyyCM488wzAQAPPvggJk6ciHfffRcnnHCCxndnPKkmnsfjcdqxYNaolM+PGVKKt7e1ZixPT9dnB1CUnxeoZ6dPY9+LXPHqYDpNhxgVoUKlaogqO9wHxkdExFPhdaC5y/q9dozss5POrySnsRJf1+u0w2W3wR8Ko6MvII4xfhOSrvS8VJSe5+LZ4W0VtB/bvCLrw72dCIbCqpr0ZQv363id9qQpKB68ZGqUqrX03GzPjtzkUd3nUa6hgzJPYxmr7EQ9O72Dz7Oj6egPBAK48sorIUkS/vKXv2Dt2rXYvHkzHnroIc2BTibC4TBeeOEFHHnkkZg7dy7q6+sxa9asmFTXunXrEAgEMGfOHPHYhAkTMHLkSKxevTrlun0+H7q6umJ+zCAcZsIwnGtp4czRkRPZloPdYp3JSNdnB1A0FixQZYcrLA6bpOtFMhU8NWFUcKi2GguQlR1OvF+HU14g5ed+AwzKqjw70QusO4lPQpIk2bejMCl3R/umpPfscHO4DspOFoH8EXVlKHc70OcP4bODPVlvgxrSjYoAslF2tCknascx5IpmZccjB2PhDANhDxs4KoJD1VgqcTqd+Oc//2nUtsTQ0tKCnp4e3HrrrTjnnHPwn//8BxdffDEuueQSkS5rbm6Gy+VCVVVVzN82NDSguTm1l2X58uWorKwUPyNGjDDyrQi6BgIIhlPPxdJCbZkb4+vLAADvpejHEwiFReoiWZ8doPA9O9m0088FPcqJ08FP2qoMyhXySTEyIiJ5sFOhofw1nxii7NgzBzv9GWYdiYosRfk5V3bSe3b4sZKDspPD8W2zSZgqfDvGprLSlZ0D6quxfFmnscwJ5DUblKOeIsZiVehkCGXHwCICrhq1DsI0luazykUXXZTUSKw34XDkoP/iF7+IJUuWYNq0abjhhhtw/vnn4957781p3cuWLUNnZ6f42bNnjx6bnBEeTZd7HLqc0DN1WuYpLJuU2lvgKfQ0VpZN17JFGJQN8uyIaiwVBuUGhbIzdURVUmMoUDgjI0Q1ltkG5SCfi5X8dUWvHYWy06XCs1Mi/F05KDs5Ht9mdVLmqleq/SH67GS44Gv27JicxpKHgKr7PDxOGxzR3k2ZfDvCoGxQjx0AqIlmFDr7A5ZvRaE3mr9B48ePx89//nO8/fbbmDFjBkpLY8uiv/vd7+qyYUOGDIHD4cCkSZNiHp84cSLeeustAEBjYyP8fj86Ojpi1J2DBw+isTF1qaXb7YbbbdwBlQq9554cP6YGD6/ZnbLTcnuvPJjPbkveFsDrLOzJ52YrO8KgHAiBMaZ7u4UeFVU+nApvJGj2B8MJVVjxywHmNhbc1tKNp9fvw1WnjkuZ2ojHCIOymj47vjSl5wBQybsoR4NFxpiq0nM9qrFyPb7lYMckZSfHNJZmz47pBmVtn4ckSSjzONDRF0D3QACNlZ6Uy7aKhrPGXZuqvE7YJCDMIj3Y+NiZwYDmYOevf/0rqqqqsG7dOqxbty7mOUmSdAt2XC4XZs6ciS1btsQ8/tlnn2HUqIhxd8aMGXA6nXj11Vcxf/58AMCWLVuwe/duzWXwZsC7J+eawuJwZefjfZ3oHggklCvLlVhpKkYK3bPjz97AmQ38JMenH+vdD0N4dlS8H0mScERdGTYf6MKZExJ7X3Hy4dm567Vt+NeG/Wiq8qY12ivRelevBjWenYxprDjPTp8/hFA0HZ0uKOWzsXLx7ORiUAaAadEJ6NsP9aKzL5BS/cuVdA0FAfkzDYZZWrO09tJzOaAMhVnKmzq9EGksDdVxZe5osJNBfVLTcDZXbDYJNaUuHO7xo5WCnfTs2LFDtxfv6enBtm3bYta9YcMG1NTUYOTIkbj++uvx1a9+FaeeeirOOOMMvPTSS3juuefwxhtvAAAqKyuxcOFCLF26FDU1NaioqMC1116L2bNnW7ISSxzMOgU7Qyu9GFlTgt1tfVi3qx2nHxV7wctUiQUUfjWWPBfLLM+O/JXp84d0D3a6NZSeA8BdX5+Ofe39MSMi4snHyIjmzkjX3k4NqTPRQVnHYMepwrPDDcqpPst4zw7fj3ablPa402U2lkaPSDw1pS6Mri3BztY+bNjbgdOOTK0A5oJazw4QUfBSBTvZenaAiAqWTmnTAxHsaPjeR242+jOqT3I1lrFZh9pSdyTYGWS+nZzOKoyxmEZbWnn//fcxffp0TJ8+HQCwdOlSTJ8+HTfddBMA4OKLL8a9996L2267Dccccwzuv/9+/POf/8TJJ58s1vH73/8e559/PubPn49TTz0VjY2NeOqpp3J5W4bRprLsXAuz0vh2+OtVpTAnA4XfZ0fuMGuOsmO3SeJEbEQXZXlchLqT9ri6Mpya4QLG1QczPTv82NMyhsTIqee+tKXn6ecxxXt2lA0F06Uxc/V3BRQ9pNQY1lPBS9A3GOjb6UzTPRmIra5K10VZ63w0t8MujhczUll9GkvPAVmlTXez4QvKfbCMLD0HlBVZ5pWfb2vpxqHu/Ja7Z3VW+fvf/45jjjkGXq8XXq8XU6ZMwT/+8Q/N6zn99NNFwKT8WbFihVjmyiuvxNatW9Hf348NGzbgi1/8Ysw6PB4P7r77brS1taG3txdPPfVUWr9OPhFNo3TMyaYzKaebeM4p+GosE+dicYzqohwOM/T41VdjqYWbRs1UdriqqOW44hd2XcdFKJSdVDdmfBtTpbEqS2Inn4uGghkC0lKRYsluvyuPr1x6SAnfjoEVWaL0PMW5xm6T4LRHAsN0vh2t4yKA2PJuo9HaQRlQbl/qmw2usjhskmqPW7bU5qEi66Z/bcLMW/6LZ9bvM+0149H8Dfrd736HG2+8EYsXL8ZJJ50EAHjrrbdw9dVX4/Dhw1iyZInuG1kstKqYi6UVPiF9494ODARi0yrpJp5zCt2zwy9UZnRP5pS4HGjvCwg/hV70+uUutGoMymoRyo5Jnp1wmKE9GhioDXYYY7JB2QBlB4ikyVyORCUm3WwsIDGNpabsHIgNirMxs3MVwWnPrYfU2CGRFhU8tWgEmTw7QCSACYSCaUdGZNN+oMztQFuv35Rgnh/PWtKK5Srmd7Uq/DpGz5isNVnZ8QfD+CBqkD96WIUpr5kMzWfUu+66C/fccw+++c1viscuvPBCTJ48GT/96U8p2ElDqwEGtBE1XjRWeNDcNYD1uzswOzpTC1BOPM/c0r5wPTvZzw7KFt4ZV+99xu9MnXZJV5Ou2Z6dzv6AMPCqVb+UpeFGVGPx10i2bl+aqedAokG5S6Wyk6uZXfaj5XZsV6aY3K4nfN2pqrGAyGfR40PaocP8ONBy/JeaWH6elbKjIo112IRKLA5vWmjWMNCP9nVgIBBGTakL4+rKTHnNZGg+qxw4cAAnnnhiwuMnnngiDhw4oMtGFSttOnVPViJJUspUVltf6onnnML37JhrUAYArw4lxcnoUUw81/PursJkZYcrioB6xVBpINa1GkuhEqUyKfenGQQKAFXR0nOuXnQNqFV2Ys3sWsl24nk8wnNkoGerS5Wyk3kYKG8DoCXgLdcwfypXeM8kLR5BNWm2w1E/i5E9djg8s3DYpDTWmuh16fjRNYarVunQfFY54ogj8Pjjjyc8/thjj2H8+PG6bFSxIvoo6GxAE8HOztaYx9tVpM1kz05hNpgSyk4WgxKzpVSHAY/J6NJYiaUWpWcnl4ICtSjvGNWqX7wSC9A3jWWzSaKpW6pgJ2MaK07ZUTMEFIj4VHijwmxSnmLieY7+Lb6d/YFQxqnj2dKhJthxZu61Iys7+npi9KIvizQWVwDTBWPc4mBk92QON0Cbpezwm3B+ncoXmr9FP/vZz/DVr34Vq1atEp6dt99+G6+++mrSIIiIEA4zhbKj7wHNK7LW7WqHPyhL9ZnmYgHy3ayWqhkrkcvsoGwp0aGkOBlyjx19DYpcgQiFGfr8IcN7EimNj2oVQx6IOGwSbDr3SnE5bAj6QxmDnVQXWN6bpj8QwkAgpNqzA0SOy4GAPztlR6fu4OVuh2gk19kfQH25vjcG/mBYvL9UpeeAsotyugaP2Xl2AHPStDmlsdIalI25EU4G76LMX9NIQmGG93dG/Dr5DnY030LNnz8fa9aswZAhQ/DMM8/gmWeewZAhQ7B27VpcfPHFRmxjUdDRHwCfA5cu+MiGI+rLUFPqwkAgjI/2dcqv2TcIqrFM7qCsfC29Dco9Bik7XqddqBtmXBDa+5TBjjrFUFRiGTCZWx4ZkfwY59uY6gJW7naIZnVd/QHV1ViArDhmU5HVq1May2aThLqTTfsBxljaPkXK9Fi6lglq0ljZeHb49yWX6fJqCIXl/aDFR6XKs6Nzd/10iGosE5SdTw50occXRLnHgYlD82dOBrJQdoBI5+KHHnpI720panj35EqvU/cTuiRJmDm6Gi9vOoi1O9owY1Q1BgIh4SlJF1wVvGfH5NlYkdcyxqDML6JquidrQZIiF7u2Xj+6MrSs1wOlPK5WMeSBiBGT63laLFX6RIyLSPHakhQpB27r9aOjP6DaswMoGgtmcSGW/Wi5Hw9VXic6+gKaTcqMMVzwx7fQ5w/hucUnJ/2e8WCnwuNI28FYzcgIreMiAIVnx+A0ljJtnU01VlrPjlB2TAh2oteD7oFIZZyWlKFWuF9n5ugaw7tbZ0LzmeXf//43Xn755YTHX375Zbz44ou6bFQxYnQrcF6CvnZHxLfDT2p2myQMqsko+GqsvCg7BhmUfeovolqRy1+N9zUo01h9AXWKhtZmclrINDIik2cHUJSf9wWEOpLJswMoy8+zUHbEqIjcj23updFqUu7qD+LjfV34/FAvnvpgb9JlOkWPnfT7Q558nrn03K3hhtCsYaD8HClJ2SlPakvPjabC4xRKL5+faBT8epTvFBaQRbBzww03IJREDmaM4YYbbtBlo4oRo/w6HH4wvb+zHSGFP6i6JH3fhkLvsyM8O3lpKqjvyVXrqAgtVIjJ58ansdoU/Tv6/erSWGJUhJFprCyrsQD5Qt7R51eksVQoO+7s/V16KjsVWZafK1OSD769E+FwosFdHgKa/tymyrPDg50U3ayToSaY0INehT9QS1UR9+ClMyhzZafOBGXHZpOE2n/YQN9OOMwsY04Gsgh2tm7dmjCJHAAmTJgQM+eKiMWI7slKJg6tQLnbgW5fEJ8c6FLMxUp/t1Xonp2+PCo7ehuURbCjs0EZMLexYJvigqq19FzPsnOO6KKcYmTEgOizo0LZUaSx1Hh2+PcrK8+OTx/PDiCPjNGq7HQolv/8cC9WfnYocZkMoyI4qqqxxMgQ7QZgo5Udfq7RYk4GMqexYopXTFB2APmm28iKrG2HetDeF4DXacfRTZWGvY5aNJ9ZKisr8fnnnyc8vm3bNpSWluqyUcWI6J5s0MFst0k4bnRkBs7aHW0xyk46PK7IIdAfCJlSlqw3wrOTh6aCeis73HNgRBpLKDsmGJRjlB2Vx5WRBmV3GmWHMaYYBJr6tUWw0CcblNXMLxPKThaeHZGi1UG1rPRG1tGhMdhRKjsA8MDbiYOgRffkTGksNX12slB2RDBh8LHdn2VPL2WfnVASZaxrIIBg9HEzmgoCSpOyccoO9+scO6rKkPS0VjRvwRe/+EVcd9112L59u3hs27Zt+P73v48LL7xQ140rJnhO1sg+CscL306bqonngHznyVj6Oy4rwhjLi2dH3K3rXP1hhmfHjGGgbQrPTijMYnropMJQg3KaYMcfCosRHe40np1KxcgIngqs8Gb+nETlXhaBMQ+QynTw7PAUk9bPn1d0jhlSCpsEvLn1MD472B23jEplx8FT5mqUHS2enWiayHBlJ/0MtVQo59wlOw54KqnC4zAtKKgV5efGKTsihTW6NsOS5qB5z952220oLS3FhAkTMGbMGIwZMwYTJ05EbW0tbr/9diO2sShoM2AuVjxyc8E2cRBnKnNXmjILzbfjC4ZFOb+Zg0D53br+1Vj6DwHlmDkMtC1ODVCTIs1mJpJa5NLzxIvsgMJTlO4ixvvHHO72i/ejSdnJ4ljpzWLCdirkkRHaLm48kJk0tAJfmNQAIOLdUSJ7dnJXdvwW9uxk263d45QnsyfbRjPLzjny5HNjgh3GmKXMyUAWpeeVlZV455138Morr2Djxo1i6vmpp55qxPYVDWaUFh4zrBIepw1tvX68tzMSVafrsQNE0gZOu4RAiKE/EEKVYVunP8o+N1rvtnIhl7v1dHSLkubC9ez0+YMJd+79/lDGu35fFnf0aklXes5TWDYJYip3MviFfHdbn3hMjQKXS08m2XyvQzVWliMj+EDXqhInLpzahJc3HcRTH+zFD+ceJW6k1IyKABTVWCmUnXA4u2GwZnt2simGKPNEhpUmS7Xxa4OZwY7oomyQsrO7rQ8Hu3xw2iVMH1llyGtoJatbBkmScPbZZ+Pss8/We3uKFqOrsYDIHeyxI6vxzvZWkS9V08DQ44xMIy608nOlrGxmDwejOygbouyYNAyUK4ouhw0uuw09vqAqZUdUY5mcxlKWnaersOGenT3tkWCnxGVX5S8qzeFY0XPIbaXCYK2Fzj7Z+3f8mBpMbqrApv1d+L/3duM7px8Rs8503ZOBzH12lMpbupRiPEoDcDbT5dWSbRoLkCezJ+sFZGbZOUd0UTbIs8OvP1OHV2kegGsUqs8sq1evxvPPPx/z2N///neMGTMG9fX1uOqqq+DzmTMyvhBpFW57Y6N3LhlyI1ymaiygcCuyRIdZE+diAUaWnhtnUDbLs6MM6rX0cDI2jWWPeQ0louw8wwmZKyP7O/oBqP+MRAflXJQdnZoKArkpO5Ik4cqTxgAA/v7OLgSiwUmnWmUnQxpLGQRlo+yEwiytHyhXsjUoA0plNfE4MHNUBMfoLspWKjnnqD6ifv7zn2PTpk3i948++ggLFy7EnDlzcMMNN+C5557D8uXLDdnIQicUZqoNw7nCmwtyMlVjAYr5WIUW7Pj060OiBRHs6G1Q1tCZVyuyZ8fgYEehBPD9pM6zEzUoG5jGSurZ4WXnGYIdHixwj5iasnMgN2WnR8chtyKNlWWfHa5snT91KIaUudHcNYAXP24GIPuAKtX22Uml7EQflzKkFOMpcdnBxZx086dyRZ6LlUUaK81k9kN58OzwDIMWg/Lj7+3Bcxv3q1qWBzuzxlrDnAxoCHY2bNiAs846S/z+6KOPYtasWfjLX/6CpUuX4s4776RBoClo7/ODsciXuDqD1Jsr00dWxZwo1ARXQtlR2QDOKuSjxw4g5+x7/fpNEQ+FmWhaZkQaK92dpZ60KSR5LQ0rhVcjT2msTIbYqribBtXKTg4qYJ+OxwOvxursD2g6Zrlqw89bbocdl50wEgDwwFs7ostE3pvqPjsp1BefIuDVkoqSJCltMKEXvBt4LspOMl9Rq4mjIjj8tdT22WnuHMAP//khvvvoemza35l22QOd/djd1gebBMwYVZ3ztuqF6jNLe3s7GhoaxO8rV67EvHnzxO8zZ87Enj179N26IoEfUFVeJxwG3Lkq8TjtmDq8SvyuRtkp1PlYvXmYiwXIJ7uwjuX6ypOgkR2UDVd2FFWH4rjSksYyuc+O2jRWfKWRmlERQG7+Ltmzo9+4iKAiqFaDrOzI73fBrFFw2W3YsKcDH+xuF+MiMnt20qexcklllptgUs4tjZX6+8dTSUa2JYmH3wT3+IKqbka2tfQAiLQoufXFT9Muy1Wdo4dVGnLjli2qj6qGhgbs2BGJ5P1+Pz744AOccMIJ4vnu7m44ncaqFoWKmUPegNg8qRqDcqF6dvpzuNPKBWXaTC+TMj9Juxw2QwbzmTUuok2RruXHVZ8qZccEg3KSNJZPxVwsIDG4UVsxl+3U82AoLAJpPTw7HqdN7Actvp2OXu7Zkc8jdeVuXDitCQDwp9e3CXN5JmXHk6GDsmgomMXxX2ZCY8Fc0ubplCdRjVVunrJT4XGIDIAadWdHa6/4/5tbD+PNrYmdtDlrRH8d6/h1AA3BzrnnnosbbrgBb775JpYtW4aSkhKccsop4vkPP/wQ48aNM2QjCx0zeuwo4cGOy25T1WpeeHYKrBqrNw/dk4FIt2p+l5qN8TQZwq9j0J0Qb4DXHwgJY6kR8DRWTYlL03FlqEHZni6NxT076V83fqCumrlYQPZTz5UBoh6eHT65HVDv2wmEwuiOHt/xyta3ThoNAPjvJy0AAIdNynjTodazk83IEB5MdBup7ORwcyV6ASVNYxlfqRuPJElyrx0Vvp2dhyPBDv+eLP/3p0nnpAHWNCcDGoKdX/ziF3A4HDjttNPwl7/8BX/5y1/gcskfzgMPPECl6CkQ3ZNNctufMLYW00ZU4ZJjh6nKfReqspMvzw6gaCyo0z7j8rYRKSwg1vdhZPm5ciyKluPKyHERrjQXWbVpLCBW3VCt7GTZk4kHRw6bpFtqr0rRBVoNSgUoXrWZ3FSJWYqLGa/WSgcPYlKlTbjyllWw48k8bDNXZINy9p6d+O/eQCAkVF0zlR1A0UVZRfk5D3YWnX4Eyt0ObD7QhX9t3Jew3OEen0h5zbSYsqP6zDpkyBCsWrUKnZ2dKCsrgz1uUNsTTzyBsrIy3TewGGg1WdnxOO14ZtFJmpYHCi/YEbKyyaXngHJkhD4n124DR0UAgCOq8vX6Q+geCBh2LHKPR0zpuao0Vn7GRcgGZTXBjhO7IzetqkZFAHJQPBAIIxRmqvtB9Sj8Onr1jeEBi9r2A7x7coXHkdRreOXJY0TKQo2HKVOfHW5ctqpnJ9sOyoBi++KCHZ7Cctlthqm6qRDl5yqUnR3RYGfGqGpcc8Y43PbSFtz+8meYd/TQmBTw+9Fmtkc1lKuyUJhJVoNA4wMdAKipqYlReggZoyee54qXDwMtsDSW6GhqchoLUA4DzbzPNuzpwJ9Xbk8p+wLySdBIQ1+5Cb4d5QBabxYGZbOnnquZeM5RqhtalR1AW0UWX1bP40EeGaE22IktO49nzsQGjKjxRpZRE+zwDsqpDMrRgDeXNJZVDcplKaqxlA0FjWqGmAq1k8+DobDoHD56SCmuPGkMGis82NfRj3+s3hWz7BqLprCALIIdQjui0ZqJTaO0oKVE2Er0+rM3DOaKliqbm5/dhOUvfoqXNjWnXEaei2WcyZ+rEUZWZCkbpGnp32RkNZas7CRuh0hjuTK/rvKir9az43bYhJqjxcwuq5Y6BjsaR0bwhoKp2mXYbXKTwRE1JRnXJzw7qUrPc1B2zJiPxQNQr1P7Z1LuTl6NxVNIZvbY4fAuyoczpLH2dfQjGGZwO2xorPDA47Rj6dlHAgDuem1rzLw1q/p1AAp2TEE2oFlU2SnQNFafLz8dlAFt/VP2Ru+K3v28NeUyvI282otoNghlx6BgJxAKiz4+NaVuIW+rucjna1yEqMZSoewo1Qu1TQUlSTbuakl5yqqlfse21pERmZQdALh89mj8acGx+PG5EzOuT+24iKyqsYSyY3xTwWzON6kMyoe783cjXKtyPtbn0RTW6NpS2KKB+/xjh+OohnJ0DQTxpze2A4gE0ZsPdAFAjJ/LKlCwYwI8ejfLs6MVj4a2/laiEJQdXzAkPFv8ricZIo1lYLBTYXBjQe7XkaTIhVVLEO0zsBrLnab0fEBl6TkQ20dGrWcHyK6LshHHtrKxoBo6+jLPvLLZJJx7zFA0VHgyri/juIhcPDsmlJ7n4tlJVXrOVZV83AjXqpx8zs3JY4aUisfsNgk3nDsBALDinZ3Y296HdbvawFhkuXoVx4PZULBjAqJplMXTWAWn7ORpNpbyNTPdrbd0yRLxp83dMZKvki5TPTvG3P0q/Tp2mwRv1KOhzqBsYDVWmtJzOY1ljGcHUKqA6r9fRqiWldEATW3pebti9IceyJ6dcNIuzr5cqrFM9OxkMy4i1SBeUalbng9lh1djqQt2RiuCHQA4/cg6zB5bC38wjN/95zPL9tfhqPrUnn32WdUrvPDCC7PemGIkGAqLOySrKjuF6tnpy6uyo+4C1tI9EPP7ezvb8YVJDQnL9YhqLDM8O8ZcEESPnehxrq3PTr6qsdRfYGM9OxqCnSwaCxqi7JRoVHZUDvhUC09PMRZJW7ocsYZcnlK0omcnGAqLgLwkm6nnHrlVRTAUFtVtoqFgHpQduc9Oes/OjtZIGn7MkFhfliRJWHbuBFz4x7fx9IZ9aIyqOVb06wAqg52LLrpI1cokSUIoVFgXTKPhHWVtUvrcdz7RUiJsJfpEU8F8eHbUpSaaO2NPJGt3tCYPdkxJYxnr2Wnriw921PciMsOgnMwroiWNpTTqamkRUJJFY8FeQ5QdbX12OoSyo1ewI3+2vmAoIagR89GyOAaMVnaUTR6z6bOjVGx7fSFUlkTeo7Iay2x4piFTNdaOw5G+OaNrSxOemzK8ChdObcKzG/fjQGfkxs6qwY6qoyocDqv6oUAnkXhp34pomWFkJfidcjYnn1wpVWlQPtgVOQHwE30q3w6f1mxkr43yFFK6XohO4dGgXkt6lBuUTS89jwZA6poKRvafXUW3YCWlWTQW7DWgrYLmaqzoqAi9+qUoP9uBJBVZ3LOTaShrMtIN2tQDHqgqu6drITIGJvJ3ypsNoezkpRor8rn2+UMpz/3+YBj72vsBAGPqEoMdALh+7lFi9ERTpQfDq70GbG3ukGfHYPIZuatFvigV2tTz/AwCBWTVojfD3ToPds6aWA8A+Hh/V9ITshgXYaSyE01jGeXZ4cd6TVlcsJPvcRHp0lh+9crO0Epv9F+Ppp4ovHy8T0s1lhGl51r77OicxpIkKa1JWVZ2slFOjO2gLLq1O7Nv8pgsIDucx+tDmdshvhupuijvbutDmEUC9roUAdmImhJ8c/ZoAMBJRwwxvV+QWrL6JvX29mLlypXYvXs3/P5YCey73/2uLhtWLJjdPTkbtPRDsRJ6ToXWCk8v8Hk5qWiOBjvTRlThw72d2Nvej3W72nHakXUxy/GSVFMMygalsZTdkwFFs8p8G5TTBTtBHuxkft2mKi8euOI4VZVHSmRlR0s1lv6l57x0vnsgqKqbc4fOBmUgou74guGkKUXRWDILZSfd7Ck9yGVUBKfM7cDhHr8IdsJhhrZokJEqkDASSZJQW+rCgc4BtPb4Mbw6sVfSDoU5OV0Q86NzJuCYYZU4Ne68ZiU0n1nXr1+Pc889F319fejt7UVNTQ0OHz6MkpIS1NfXU7ATR6vJE8+zQcsduFXQeyq0VuRxEeqUnYYKD2aNqcXe9r1Yu6M1MdgxsfTcqDRWa2/sxVFLetRIZUev0nMAOHNCot8qE7K/K7/KjnKkQ1d/IGN6qkM0FdQx2HHagYFg0saCXO3JxbPjD4bhC4ay6tWTDh6w53JjJaeRI/u1vc8P3lQ9X6MVassiwU4q306qSqx4XA4bLpo+TPft0xPNR9WSJUtwwQUXoL29HV6vF++++y527dqFGTNm4PbbbzdiGwsa0T3ZwspOIc7G0nsqtFZ46iyzZycS7DZWeESjrWS+HS6/a6ny0YrRyk5bnCSfVZ8dQ0rPI9uRrvRcbbCTDXKbgvwqO067TQQFmXw7A4GQ2DeVOhmUgfS9dnJSduIMwHrTl0PZOUdMZo9+1/nNQVWJ0xBFUw2ii3KKiqwdrZFgZ2yGYKcQ0LyHN2zYgO9///uw2Wyw2+3w+XwYMWIEbrvtNvz4xz82YhsLmsMW754MFGY1lhFTobWgpvScMRaj7PAqhY17OmNShsFQWOx7I9NYlUaXnselbLmioSY9GgjlybPDZ2NlcYFVS1bKjkFtFbj/JlOww1Udu03Stau3O01lXC4Br9I0boRvp9+fe8o8fj5WPs3JnCEZ5mPtVHRPLnQ0H1VOpxM2W+TP6uvrsXv3bgCRAaF79uzRd+uKAJ6TrSkAg7I/GJnMXAj0Kk4++TDEqSk97/YFxfONlR6Mqi1Bfbkb/lAYG/Z0iOWUhkUjzdbKaqxkTd1yJaH0PHpcBUJMBDOpMHQQaHSdwTBLGMaqNY2VDdk0Few1yMOldmQEL0+v8jp1/X6lGxkhKzvZfRZCOTFgZISYVZZTGiv2ZkO+Ec7ftaEmQxflHSrTWIWA5jPL9OnT8d577wEATjvtNNx00014+OGHcd111+Hoo4/WfQMLHdEh08JpLGXZbaGYlEWPnTxUYgFQNe/oYLTvRKXXCU+0iuP4JKksfvJzO2yGKBscniILhZmmC68aGGNoj1N2PIrhmplUQ78Jyo7ydTj8eFdTep4t2Y2LiAbzOqdo1So7vOxczxQWoOiinOR44AGQO0ultszAkRF9enh24kZGtFpA2RFdlJPMx+r3h0TvnEGZxvrVr36FoUOHAgBuueUWVFdX45prrsGhQ4fw5z//WfcNLHTipX0rorybLpRUVp8OsnIu8CArnfmW+3UaKuSTWTLfjhndk4FIqsYRrcDR27fT1R9EMKqa8GPdZbeBF/yk66IcCjOhKBo5LgKIVRRCYSb6+xiq7KgcLaJEbpipbzDPewV1phhbwjGiEgtIn8bKxbMDyMGEln5GapHTWDl4duLSWOJGOI+qvzwfK9Gzs6stoupUep15M1DrieZP7rjjjhP/r6+vx0svvaTrBhUbhwugGstmk+Bx2jAQCBdMRVY+R0VEXlduFMcYSyr1Nyv8Opzjx9QCANbtakcgFIbTbhPKjpE9doBIqWmF14m2Xj+6B4IYWqnfunkKq8ztEKkKSZLgddrR6w+lDaKVXhojlB3e8Cz+tZQqppGenZyUHZ2DedWenejzVTr12OGkS2PlUo0FyDcgRnjS9Cg9jy8QsMK1oSaNZ0dtJVahoPmoOvPMM9HR0ZHweFdXF84880w9tqlo8AfDYsCjlauxgMKbj2XUxUAt/HXDLPmJG4gtO+eMry9DVYkT/YEQPtrXCQDoiXoMjDQnc3hApXdjQe5Nqy6NvTiqMb/HBDsGKDuSJMkmZUUaS7lNHp1LlZWUaOygHAozYZzWO02rtrEg75mk94gbHlQmO8/k2n7AyJER/CYwm7lYnPjJ54eFspPPNBafj5UY7HzOp53XJvbfKUQ0H1VvvPFGQiNBABgYGMCbb76py0YVC/yEYbdJunUhNYpCKz/Pv2dHft1Ud+w82GlUBDs2m4SZo2NTWWYpO0Dq6cu5Ironx1UdimAnjaqhDECUKoyeuJNMPucXXJfDBpuBo1xEmwKVJdHKqi3dlR2VIyPkHjtmKjvhmGW0YqRnR4+bq/gOyjx1lM/u+rxKuLXXl1C0wJWdMUPKTN8uI1B9dv3www/F/zdv3ozm5mbxeygUwksvvYRhw6zdVMhs+AWgusRl6MlUDwqtsWC+lR0+I8cXDKPXF0zqyWqOmvsaKmM77s4aU4NXNh/E2h1tuPq0cXJDQTOVHZ09O/Hdkzlqeu0ozclGVda5HDbAFx/sRMvODTSFA9qVnd4c5zClQ3U1Vp/cA0ZP1PTZyVbZKTdQ2RFp8xy+o4nVWNygnMdgJ/raA4Ew+vyhmJvHnYcj085HDykOZUf1Jzdt2jRIkgRJkpKmq7xeL+666y5dN67Q4ZF7Pg9mtXBlZyBFSsZqiLlYefLsAJE7dl/Qn1rZ6Y4alMtj1Y5ZUd/OezvbEAozcXI2snsyR558rrOy05vc0KomiDZy4jknWa8dUYllcMAsN6AMpfR3KVE2FNQ7+KvyRj6fjNVYUWVH7zSWXI2VTtnJrRrLCM+OSGPlNC4iOr8rwaCcvzRWicsubtraev0xwY5IYxWJZ0f12XXHjh1gjGHs2LFYu3Yt6urkdvculwv19fWwZzHArZgphEosjpp0g5UQc7Hy0D2ZU+Kyo603dbM4XnreGKfsTBxajjK3A90DQXza3GVK92SOYZ6dFAMN1aRHjRwVwZE9O/J2mNFjB5AvkKEwgy8Yzvh6RqZohUE5g2en04BREYC6NFb2np3YYEJPhEFZB89O90AQfX65B1c+DcqSJGFImRv7OvpxuMeHETUl0W0MCOWpWAzKqr9No0aNAgCEw4Vx528F5Im21q3E4hSaQdkKyk66ZnGhMMOhHl56HhvsOOw2zBhVjZWfHcLaHW1iVo4ZaawKrzGenVSBvSrPjhnKjj2x5FlOYxkd7MT6uzIFO0amaKtUenba85LGCsUsoxUjPTv9OlR/ymmsgFB13A6briNBsqGm1IV9Hf0xFVm7WiMprCFlLlNuwswgq6Nq+/btuPbaazFnzhzMmTMH3/3ud7F9+3a9t63g4RUqVq/EAgrPoGwNZSd1SXFrj09Mlk4mUyubC3abmMYyyrMT3z2ZoyaINrKhICdZGkvMxTL4YqP03qgZGcGXMVLZ4R2SUyGnscw3KFvSsxPQz6DsC4ZFs74hZe68dIBXkqwia0cRjYngaD6qXn75ZUyaNAlr167FlClTMGXKFKxZswaTJ0/GK6+8YsQ2FixyhYr1g51CS2Pxjqb59exwZSfx5Mp77NSVuWFPYk5XNhcshmosoezEe3Y0lJ6bHeyINJbBBmUg1reTCT1GE6SCV2MNBMIpA1DGGDr7jSk9T+XZYYyJoDfraiwxLsKAYEeHz0QZvO6KDti0gp8z2ciIYhoTwdF8dr3hhhuwZMkS3HrrrQmP/+hHP8IXvvAF3Tau0OEHTz5LC9XijZ6ECkXZ6fPltxoLALzOaMfWJCXFohKrInkK85jhlXA7bGjt9ePDvR0ACrvPjgjsy1IZlFOnv4WyY0IaS1nmbpZnB5D9XWq6KAtlx4BAvszlgE2K9Ifq6g8kfe+9/pDoLK1/6XnyNFYgxMArn7P27Ig0lv6zsfRoYuq02+B12tEfCGFnNNixgsVhiBgZIXdR3llk5mQgC2Xnk08+wcKFCxMev/LKK7F582ZdNqpYYIzBYZMKIo1VaJ6d3jx3UAbSKzuiEivOr8NxO+yYPrIqsmx0rIQpyo7w7Jhbes7TAMngaoszT8qOkXOxOFq6KPcYaFC2KXp+pfLt8BlnLodN932TKo2lDEKz9uwYWnoeWWeulXs8IONl3VZSdpSenR2txRfsaP421dXVYcOGDRg/fnzM4xs2bEB9fb1uG1YM3H/5TDDGUAiDxD2FlsYyaFCiFtIZlA92JnZPjuf4MbV493N5RpbRs7EAY0rPBwIhsQ/iZ+jwi0O62VhiJpKByo47abATNSgbOCqCo2U+FlctSw06tiu9TrT3BVL22ulUjIrQ20+SqoOycjBotgpfuZEGZR0GgQIRX9Ghbp9IE1lB2eE3KIeTpbGKyLOjOtj5+c9/jh/84Af49re/jauuugqff/45TjzxRADA22+/jV//+tdYunSpYRtaqEiSBIOawuqKmuZvVsKoQYla4KpSsmZxontyZepg54QxNbhT8XuhprF4utZpl4RJlKOq9DwkdzI2imTjIsxMY2lRdoxWLStLXEBrX8ry83aDhoACmZUdp13KugFrmRgEGhLFAXrgD4ZFWi/nYCf6/ZM9OxYIdsq4shNRmDv6/KKDdrE0FAQ0BDs/+9nPcPXVV+PGG29EeXk5fvvb32LZsmUAgKamJvz0pz/Fd7/7XcM2lDCWQgt28t1BGYAoGU2mhiUbAhrP9JHVcNgkMS3cjGCn0oDS83ZF2Xm8EiAfV6k9O4Fg5P0bGuwkGxcRNNezA6jrotynaCpoBJnSWB0GVWIBqaeec8NytuZkILaasdcf1K1kWvn91iuNxQNaK6SxxMiIqO+OqzoNFe682gT0RvU74XMzJEnCkiVLsGTJEnR3dwMAysvLjdk6wjREuqFAgp18z8YCAK8rtUFZHgKa+s7N67JjyvBKfLC7A4C5TQX7AyExdT1XWkWwk/heS1SkR31mGJSTXGS5adoUZUfDfCxRjWXQsV2VYWSEUaMiAGU1Vux+0KP9gNthh8tugz8URs+AfsEO95s5bFLOx2j8DU1tku+M2SirsRhjwjxdTH4dQKNBOf6urby8nAKdIsFTQLOxGGMxLfXzRVqDctR03JhG2QEivp349RmJ8mSbSd0JhxlCKgxnXP6uKU28uKgJovNmUBbKjgmenQJSdtoN6p4MyMqNPz6NleOoCE6ZR3+TsuierMP4jnhf3pByCyg7UXXJHwyjxxfEjkPFGexounU48sgjM37YbW1taZ8nrEkhpbF8wbAwfRt196uGVE0FBwIhcSGpzxDszBpTg3tXbofXaYfDQGWD47BHOrb2+kPo6g+k7QH1jQfWYMehXry85NS05ulUE88BOYhO10zPnA7K0YtsHkvPAXU3E4Z7dsTIiOSNBTsMmosFpEljBfXxbZW5HWjr9euaptVjLhbHispOicshSuLbev3YEe2eXEzmZEBjsPOzn/0MlZWVRm0LkUfUeCusgrKixYyy4VTI1VixJ1aewvI67ajIUE5+wthaTBtRhYlDzVNIK7xO9PpDaS8IrT0+vL2tFQDw/s52nDEhdaVlqrJzQN1xle+mgmYcQ+nM7PH0imosg9JYGUZGmJLGiuuzk+sQUI4R5ed69NjhKNtLSJL+fYyypbbMhb3t/Tjc4xc9doqpoSCgMdi59NJLqby8SFFTImwVlEP59Kq4yAaRmojzYTQrBoBmUkK9LjueWXSSMRuYgnKPAwc604+M+Hh/l/j/+t3pg522FBPPAXVprEBInwtdOpIHO+aVnouUpyrPjrGl5xUZPDtyNZYRBuVoNVYgXtnRJ+A1Yj6W6LGjQ1CsDHZqSlymqLlqqC2NBDutPXJZ/NgiC3ZU7+l8z+8gjKWQZmP16Sgr5wK/847fZ7wSq748/xJ1MuSREWmCnX2d4v/r93SkXV+q7smAsoNyfmdjJe+zY2YaS72yo6eSkIyqTNVY0ccrvealsWTPTu59bACgx6dfawU9zzd8Mjtgrc76vN/P1pYe9PiCkCSICejFguqzC6/G0pNVq1bhggsuQFNTEyRJwjPPPJNy2auvvhqSJOEPf/hDzONtbW1YsGABKioqUFVVhYULF6Knp0f3bS12Csmz02uBhoKAvM/iG8W1cHNymh47+UTutZP6wvvRXjnY2bC7A+E0RuW0aSwts7FMHhfRb2afHWFmz/z9kgeBGmxQTtFnp0MYlI0rPfeHwjHHlE+nY6BMTBY3II2lQ1pRWR5vBb8Oh3v33t8Z8dw2VXpN+V6YieojKxwO657C6u3txdSpU3H33XenXe7pp5/Gu+++i6ampoTnFixYgE2bNuGVV17B888/j1WrVuGqq67SdTsHAwWVxrJAQ0Eg9XBHruxkqsTKFzyNkS6N9ZFC2en2BbH9UOobiNZ0aSwVQTS/0OlRBp+K9GksE5UdFV6SXoOPb248Tl2NFf08DRhzo9zXSnVHKDs5phSN8Oz085srndNYQyyk/HKVad2udgDFV4kFZDEuQk/mzZuHefPmpV1m3759uPbaa/Hyyy/jvPPOi3nuk08+wUsvvYT33nsPxx13HADgrrvuwrnnnovbb789aXBEJKcglZ18p7EUBmXGmEj1ijSWRYMdoeykuPtt7/VjX0c/AGDS0ApsPtCF9bs7ML4huYm6Lc3AW35c+YPhlF1tzTQoKy+wvNeLKVPPVXZQDoWZbqMJUlGp8Owoj1sg0m5AOS5Cb5S+LF8wJG6yRDWWTsqOvp4d/T4PZYdxK81M5NvCzwnFGOxYwx2VgnA4jG984xu4/vrrMXny5ITnV69ejaqqKhHoAMCcOXNgs9mwZs2alOv1+Xzo6uqK+Rns8ItSMMyEYdSqyDJ/fpUdfqIOs9iLaIvVlZ0Mnh2u6oyuLcGpR9YBANbvaU+6bDAUFmmPZGXsyo6zqQLpgAmenXRprFy74qpBzMbK4NlRVvYZXY0VCjNR5s7pGgiI6eNGlJ477DYR8CZTdnI9BsoNrMbS4zhRprHqrKTsxKXUiq0SC7B4sPPrX/8aDocj5RiK5ubmhNSaw+FATU0NmpubU653+fLlqKysFD8jRozQdbsLEY9LPhSsru6IDrN5VnaUBlJleqJZRffkfMJ75qTy7Hy8PxLsHD2sUkxmXx/t8hwPN7NKUnIlQHknn8qkbG7pubwNeZmNlaEai19Y7TbJsOo0j9Mu9kdHXK8dHriWuuyGfR7CpKyoyPLpZFDmaaxuXYMd/ZRkZb8qKyk78cUFY4poJhbHssHOunXrcMcdd2DFihW6V4ItW7YMnZ2d4mfPnj26rr8Qcdlt4BkGq/t25A6z+VV27DZJlC3zixRjTHRPTjcXK59UeLmJM7mywyuxjhlWiekjqgAAWw52J71b5imsSq8zaRmtJElCNUxVfs7VFiOnnqf17OR4gVWD2g7KPGgu0aFbbzpSVWS1ix47xl2I5Yos+XjQK+AtiwYTxqSxdDAoK9NYFhgCyhkSr+wUWUNBwMLBzptvvomWlhaMHDkSDocDDocDu3btwve//32MHj0aANDY2IiWlpaYvwsGg2hra0NjY2PKdbvdblRUVMT8DHaUF6WCUXbyXI0FJHZR7ugLiBN3vdWVnQxprKOHVaK+woNhVV4wBnyYpARd7p6c+uJYkqEiSx4XYdzFPX7qOWOyN0apahpFieg3FE47goMfR0YH8qkqsowcAspJNvlct3ERhhiUjemgbIUhoBylsmO3SUVXdg5YONj5xje+gQ8//BAbNmwQP01NTbj++uvx8ssvAwBmz56Njo4OrFu3Tvzda6+9hnA4jFmzZuVr0wsWNWXCVoBvX76VHSDxjp2nsGpKXTlL8kZRkaY8t6PPjz1tEXPy0U2RbunTeCorSbCTruyck2numlx6btz+csdNPVdeaM0cBAqk/34JZcfgQD5VF+WOfuO6J3OSdVHm/8812Cm3uEHZbpPQUOGGTQKGVXtzXp9eKL+/I6q9hlZG5ou8Xi16enqwbds28fuOHTuwYcMG1NTUYOTIkaitrY1Z3ul0orGxEUcddRQAYOLEiTjnnHPw7W9/G/feey8CgQAWL16MSy+9lCqxsqBQhoHKUr91gh2+z+Rp59ZMYQHplZ2P90XM+iNrSlAZveBNH1GFFz48kNS3k67snOMVVWvJjyufGQbluDSW0i9iRhrL7YikicMM6PMFE2YkcUxXduLTWL3GzcXiJPPsWFnZ6RNGdn0+k/u/OROtvT7Ul1vnHOFx2sXMvGI0JwN5Dnbef/99nHHGGeL3pUuXAgAuv/xyrFixQtU6Hn74YSxevBhnnXUWbDYb5s+fjzvvvNOIzS16CiWNZZUOypFtiO2fctDi5mQAqPSmVna4OfmYYfIMvOkjqwEAG/a0J5Qqt/WkLjvnZPLsBPIwG4sf43abBKfd+O7wkiSh1OVAty+YUAGlpNfghoIc3h05fmREh4GjIjjJ0lh6j4tI1x1cK/06t7o4Zrg150vWlrnR29ZXlH4dIM/Bzumnn66pM/POnTsTHqupqcEjjzyi41YNXtTMMbICZkn9auAXJX7xbO6Mdk8uAGWneyCYELwo/TqcyU0VcNolHO7xY297f0w+n6ex0nl2MgXRYlyEGQbl6GsNKHrsmDUKp8RtR7cvmDbFIuZi5UnZ6RA9dvJjUNZvXETisZ0t3CNoRouCfFJT6sLutj6MrSvOYKf4EnNE1shpLKv32bGOZ8fr5MpONI3Vbf00Fu+zEwqzhNTSxyLYkU37Hqcdk6L+nQ92x/bb4WmsmjSt7z0ulZ4dM/rsRF9rIGhe2TmnqSri0fj8cOpu1LL53pxgp6MvvhrLeIOyxymbtTl6KzthFrv+XLCSR9BILprWhNG1JTjjqOIc9k3BDiEolDSWVTooA8qZR9E0Vqf1gx2P0wZHtM+A0rfT2R/ArtY+ALI5mcNL0ON9O229ESWrpjT1xbEkk7JjxmysBGXHvFERHL5PNykmyscjt1Uwx6DclTKNZa6y49PJs+N12kULjW6dhoHq2WfHylxx0hi8cf0ZRVmJBVCwQygolGBHzMbKcwdlILH0nCs7jZXW9exIkiTPx1I0FtwUVXWGV3sT5iJNT1GRJZeep36/mdKjZkw9T/Ds+LmyY94pkPuglENW4+k1eOI5Rx4ZkbypoDnVWEplJzouIsdjQJIk2aSsU0WWnh2UifxBwQ4hKJRhoFZSdhJKz6OeHStVWiSjIomR86N9ieZkzrFRk/Lm/Z0xQYuW0vNU1VhmGJTddnm0RzAUzksai/ugPt7fmdKr2OczyaCcovTcnKaCUYNykmosPY4B7knTqyJLzz47RP6gYIcQeApF2fFbR9kRw0B9IQRCYbRG0zqNldYOdpKVn38cTa8cnSTYGV7txZAyFwIhJtIwjDHRQTndhOxMiqGZpedAREnymTgqgjO+oQwuhw3dA0GRLozHdGUnzrPDmwwaW42VLo2lX+M+PZQdfzCMYLQJZIkz/+cbInso2CEEhZLGUrbUzzfcSNrnD+FQtw+MAU67hBoD74z1oCJJ+fnHSSqxOJIkYdqIiLqzPmpS7vYFEQhFLgTplB1vtENxMoMyY8xUzw4QuYCJIaAmBjtOuw0TGyOT47mKFg8/tsuMbiqYpBorEAqLmVLmjIswRtkR5ec6KDvKwayUxipsKNghBOkuSlYhFGbiJGmF6ogSl2xQ5t2T68s9sNnMKWfOlnJ3rEG1ayCAHYd7ASRPYwGJvp32qKpT4rKnVUjS9dnhwRJgbLBjt0li2rY/GFYYlM09BYpUVqpgx2Rlp3sgKMZXKFWeyiRDXfXC7UwyLiKkj0EZ0FfZ4Sqy0y4ZqjwSxkOfHiHI1PzNCljtTks0FfSH0FIADQU5XNnpil4QNkU7Jw+r8qbsmcODnQ3Riiw13ZMBufNsMsWQX+QAY9NYgBxM+YJhcYy7TVR2ADmQ5M0b4zHNs6MIZnjA2xk1K1d4HCIwNAK5g3LiuAhdlB0duygLc7LJxwmhPxTsEIJC8Ozwk4/dJulyF5grpaKHTBDNBVB2zon37CTrrxPPlOFVkCRgX0c/WroGVHVPBhTp0SSKoXIKueHBjqL8PB9pLECp7HQlNSmbpew47DYRFPBGgrzHTjr/lR6kS2PpquzoEOz0m/R5EMaT/6sFYRm8GZq/WQGlX8eszrfp4Pus1xdCc1fEnFwIwU6FoosykHxMRDxlbgeOaoh4Ttbv6UCbiu7JgCI9mjSNFbnIKdNMRqEsP89XGuvIhnK47DZ09gfEwFUlfSaNiwASuyjztKSRfh1ANiEPBJIZlHX07OiSxrKOP5DIDQp2CEEhGJSt1D0ZkCvC+vxBkcayeiUWIE+H5ikMbpidnCbYARS+nd0dohIrY7CjQtkx0q/DUXZRFtVYJk+mdzlsOCqNSVl0UDbh+JYrsiKfozwqwji/DiAHmMmVHR2rsXRoKkg9dooHCnYIQSF4dqw0FwtQGpRDwqBcGJ4dWdnp8QUzmpM50xUVWSLYyaAEpEuP6jUmQA1J01h5uIjxVFayYIcrCammoutJVVyvHTOGgALGDgIF5EBeT4OyVW6uiOyhYIcQiBlGBgY7wVBYpC6ywWonH2UHZTnYKSBlZyCATfs6wRgwtNKDIWXpAzWu7Hy4t1MoWTVqPTvJDMrRi5zTZGVnIA99djg8oNwUZ1IOK2aVmZE2qfTGVuTJc7EMTmMJZSfyXoOhsKgIM9Ozo/SLpYIHn6TsFD4U7BCCdOkGPejzB3HJPe/gpFtfEwqNVqzUPRmQDcq9/qCYi2XlieccpWcn2aTzVIyrK0O524H+QAjvft4GIH2PHSB9Z249S44zkcyzkw+T+zEKZUdpUu5TBINmNMzkyg4vOTdjVASgrMaKfAZ6V+Sp8ew8t3E/jrrxRTy9fm/adfEA3SrnGyJ7KNghBN4k04j15OfPbY4oAt0+bGtJPfk5HVaaiwXIF3LG5EqaglJ2+gOiEitTCgsAbDYJ06LqDleyMpWel6RRDAMmdE/muBRVQPlMYx3ZWAanXUJHXwB722WTMi87t0nmBGEV3lRpLHMMyjx1FVORp4PCp0bZ+fOq7WAMeO3TQ2nXRZ6d4oGCHULgNTCN9fyH+/Hoe3vE74e6fVmtx2rVEfFG0nK3wzKBWDqUTeXkMRGpy86V8AnonEyl5+lmY+XFoBxSpLFMNigDkYv9kdGqNmVzwV5FitaMSkN5GGielJ1oGosfA3abBIcOx4Hw7KQIdrYe7MbH0b5Su1t7067LzLQiYSwU7BACo9JYe9r6sOypjwAAjmh5cUuWwU6vxTw7dpsUU75cXwDmZEC+IPQHQth+KKKyqUljAcD06FBQTrqJ54B8XPmCYYTDsb1l9BwTkAllGssnSs/zcxE7JolJ2WzzfZU3EqSK0nMThoACiVPPfToHvGXR7uCpDMpPrd8n/r+rLfmMMk6faHVhjfMNkT0U7BACZdVMqqnMWgmEwvjuo+vRPRDEsSOrcNH0YQByV3asJCsrT4SFUHYOxFb7MBapIFM7qX1anLKTuc+O/FkNBGMDaZ8wKBuvZCiDHTmNlZ9TYLKKLLPN96LPTpxnx7RqrEBssOPWqedRutlY4TDDM4pgp6MvkDD5XUkfeXaKBgp2CIHyouRTUamghjv+uxXrd3eg3OPAHZdOR1M0GDjUM5DV+nqFZ8c6Jx/libAQ/DpApINuqWK71fh1ONWlLowZUhpZj01ChSf9xVmZKopXDf158Oz4g6G8prEAZUWW3Em5VzQUNCfYSSg9j46L4IqPUcSnscSoCN2Uncj+8wfDMZPVAeDdz1txoHMA5R6HCOp2p5hADyg7KFvnfENkBwU7hMCjuODokcp6Z9th3P3GNgDArZdMwYiaEtSVR1IeLV25enasIysr78QLJdgBZIMqoD6FxeG+nepSV0Z/iU0x2iPeDxYQaSzjLyZupWcnmJ/ZWJyjGsvhsElo6/Vjf7SKT9kd3Axkz44fA4GQKEyoKjVH2eGv59db2VEEi/zmiMNTWOdPaRIB+6621L4dWUm2zvmGyA4KdgiBw24Td1e5mpTbev247rENYAy4dOYInDdlKACIYOdQT66eHevcaSkVsUIoO+eUKxQZLcoOAFGRlansnMMv4PENK4WyY4ZBWZnG8kdeN18DHj1OO8ZHTcof7Y2kssyuNFSOi+B+HbtNQrnBr+9R9NlhjOnu2bHbJHG8KX07/f4QXvzoAADgkmOHYVRtNNhJo+wIgzINAi14KNghYuAnolyCHcYYrn9iI1q6fRhXV4qbLpgknquL+kKy9uwIE6d17rSUKbVC6J7M4b12AO3KztzJjRg7pBQXTG1Stbw3RUWWngMgMxFrUOZNBfN3CjwmWv3GK7LM7iFVGU3jDATCOBhVWqu8TsMrwbiyE2ZAMMx0HRXB4epOt2JkxH82N6PXH8KIGi+OG1WNkTUlANKnsagaq3igYIeIQY9hoCve2YlXP22By2HDXV87NiblVM+VnW5fViZoq1VjAbEptUJKY3Flp67crXm7Gyo8eO0Hp2PRGUeoWt6T4rgytRorqhz4FGmsfFVjAYkVWWYblMvdDjF8dVe0BNvosnMgNl3lC4YNGRlSlmRkxNPRFNbF04ZBkiSMqo0EO+nTWNFgx0I3V0R2ULBDxJDrfKxPDnRh+b8/BQD85NyJmNQU27uFjyPwBcPoymJ2jfDsWNSgXCjVWIDs2dGawsqGVCMjeBrLzGqsAX8IgVAk0M5nsMPVtI+jnZTNLj2XJNlcvvNwRN0wuqEgEJuu8gVChqh75XGNBVu6B7Dqs0gDwYuPHQ4AIthJNn2e02+xvl5E9lCwQ8SQbmijGh5duxv+UBhnHFWHb84elfC812UXJ6JsUllWm40FyMqOJCHjbCkrMbTSCwCYMao6w5K5kyqIzscgUGWQnS/PDgBMHFoBu01Ca68fzV0D4tg2Ywgoh/fUMVPZsdkkWWVTVEwZouxEg51nN+xHmEXaJnBj8siayL/7O/sTqrY4ooMyeXYKHgp2iBhyTWPxEQJnTqhPmfuvq5BTWVrhJk4r3Wlxs/SQMrcpAy314urTxuL2L0/FFSeONvy1UnXnFuMi7MZ/niLYUfRVycdsLI7Hacf4+jIAEZNyTx4a2HF1b6cIdoxXdgBl+XnYEGVHeHaigS1PYV1y7DCxzJAyF0pcdjCGmLEdSqj0vHgonDMzYQrpJlSrobUnUtVRm0bhqIs+19KtvdeO1QaBAvK2FJI5GYhc2L40Y7gp1T9yd+7Y/k358OzwvjIuhw02m/Hps3QoU1l9os+Oecd2VTTY4RVJVV7jlR0gdvK5PAxWT4NytIuyL4jPDnZj0/4uOO0Szp8iG+olSUprUmaMKZoKWkdJJrKDgh0iBtHaP8thoIejJeXp0jl15dkpO4wxOY1lIcMgNy8WUtm52XBlh1/QOWYGO26RxooEO1ZITShNyr0+8y+svPy8tTc6BFRlK4FcUXZR5ucaPY+BcoVB+akPIqrO6UfVJ3T7FiblJDOyfMEwQtHxJlbq2E5kBwU7RAyeHIeByspO6pMmH0ugtdeO8uRjJWXnlPFDMLKmRHUZ9mAklWdHHgRqnkG5qz8ScOWz7Jwjj43oEgZlM3tIxXt0zPDsAHFprJBxaazO/oAYD3HJ9GEJy4leO0lmZClT+VY63xDZYZ3bY8IS5JLGGgiExDyaIWmGQ2ar7PTFnHysc+hObqrEqh+eke/NsDSpjO/5GBfB01j5rMTiTBpaAZvEFdFoIG+ialkZl7YyelQER1TGBUKi55ERBuXXPm1Bc9cAKjwOnDmxPmG5dGksnsJy2W0F5cUjkkOfIBFDLpPPuRTutEuo8KY+YWcb7PA7X4/TJvqDEIWBbHyP8+yY2UHZHhtwWSGN5XXZcUTUpHw4qoqWmejZiQ92jB4CyuGBpi8Yhs9AZWdfR8R4fN6UpqSeILnXTjJlx3pDh4nsoWCHiMGboq2/GlqjaanaUnfaLqz1OSo7Vio7J9SRss+OibOx4pWDfM3Fiie+e3U+PDsc86uxQoZ6djjKKiwlQtlp60M4HNvklLonFxcU7BAx5NJnR41fB8hB2bFgQ0FCHSlnY+Whzw7Hk8eycyXxTR3NDOYTgx2zqrFkg7Ix1VjyPuTjIZLRVOWF3SbBHwzjYFx1KDeMk7JTHFjj205YhlzSWIdUVGIBcrDT2usXfVbUIAYlkrJTcHic+a/Gik+VWeUiFh/smBnMxys5ZnRQBmINykYoO8pg5+Lpw1MqzU67DcOqIs014weC9ges1+aCyB4KdogYvDkMAlWr7NSUuITnhv+NGvos2GOHUIecxkrl2TGvGovjMSF1poZJTRVQXovzpey4HTbTAkBlGsuQaiyPMthJnsLicN9OvElZTmPRzVUxQMEOEYMenp26DMqOzSZhSDQg0pLKsmKPHUId4rjK4yDQ+IupFUrPgcjFdFxdxKQsSeZulzJtZVYKC1D02VFMoNfzGBhfX44TxtbgshNGivEQqeC+nfiBoOTZKS7oqkHEkItnhzcUzKTsAJFU1sEuX7SLsrpBlNyzY4UqGkIbqQzK+RgXIbbJQhexY4ZVYltLD0pdjrTmfr1RKjtmpbAARQdlhWdHz4o8l8OGR6+arWpZubFgXBqLgp2iwhq3NoRl0KP0vDZNjx0OV380KTs+UnYKlZR9dvLo2dHTEJsrvCLLzFERQORz4YpXvFnZSGLSWHw2Vp5uYvhA0D1tydNYXiedb4oBCnaIGOSBjdrHRfA+IUPKMwc7oouyhmDHinOxCHWUpBgwm4+p5xwrNBXk8Goh/r0wEx7kmKrsKNNYQfN6LSUjVa8d8ggWFxSyEjGkauuvBpHGUjFfR5SfaxgZQZ6dwiXV1HOewnDmwaBspXTo1BFVuPeyY0WDQTOpKnGipduH6lLzlZ2BgFLZyU+wwz07HX0BdPYHRPAnPDvU6qIoIGWHiMGTZRorHGZoi6axMpWeA3Kw09KlQdnx0Z1WoZIqPSoudHlRdqx1+jvn6KE4or7c9NflF/dKk0ZFAMqp52H4gpFjwp0nZafU7RDnLGVFlgh2KI1VFFjr207knVR34Jno6A+IIZ3xk4WTUZ+DskPBTuGh9OwwJneqNdWgbLduGiuf8CDHrFERgFz27wuG867sAMpUllyR1U9prKKCgh0ihmwHgfKy80qvU5X/Ipsuyvujc27qVHiCCGuhrHziHg0gz00FKdgBAJw3pREja0pw6pF1pr2mXI0VUnh28vd5jKpJrMgSBmUKdooC0ueIGPgFwB8MIxRmqgduHlbZUJAj0ljdA2CMqSq33dkauesaXZu+bwZhPZSBRb8/BI/TjnCYIRhVA80Idmw2CU67hEAo8pr5VBKsxMXTh+Pi6cNNfU23xZSdkUkaC/IbPlJ2igP6thMxKO9itJiUD6scFcHhwc5AIIweXzDD0hG/zsGovydTkzDCethtkgho+EXErxgVYkawA8SqO5TGyh8xg0DzXI0FJE9jkUewuKBgh4hBaRTVkspqFcGOOmWnxOUQ82vUpLK4qlNd4jRtMjOhL14xHytyXCnTWWZUYwGxQRUFO/lDORvLEspOtNdOUoMyjYsoCijYIWKQJElUqWipyGrVUInF0eLb2Xk4chIaTapOwRLf1sCvCHbMuqtXBjvk2ckfbkV1nhEdlLXClZ0DXQOiOozSWMUFBTtEAtn02pF77GgIdsq4b0e9sjOG/DoFS3ylX0BxkTNrREKsskOnv3zBlR1lCtusVGYyaktdKHXZwRiwpy1SCEEG5eKCvu1EAvHpBjVoNSgDQF2FemXn80PRYIeUnYIlvoeTmZVYHPLsWAMe7HQPBBWP5e/zkCQJI6M3Urujvp1+SmMVFRTsEAlUR/vkHNbQA6dVo0EZUMzHUvE6ohKLgp2CxetMblA2NdhRXFApjZU/eGDTPRAQj5nl20rFyBovgEj5OWOMxtMUGRTsEAkMq4p86fe296v+GzEXS4uyo6GL8s7DpOwUOvwOOd6zY+ZFThlYUel5/uD7Ptp5AG6HeanMVIyKKju7WvvgC4bBe19SGqs4oG87kcDw6ohZb1+H+mCHKzu1GpQdtV2UO/sDwgBNyk7h4klRjWWmsuOmNJYliN/3+fTrcPiMrN1tfTEp/BI6ToqC/B9hhOUYVh1RdvapVHb6/SH0Rk8O2Sg7mTw7XNWpK3eLcnWi8PDGTT4P5KEKh6qxrEH8LLR8+nU4vCIrEuxEUlguhw2OPFaJEfpBnyKRgEhjqVR2uLfH5bBpCkbkYGcg7XJUiVUcJHh2hLJj3oWOBzt2mwQnXcTyRmKwk//PYhTvtaNQdsivUzzk/wgjLMdwjcqO6LFT6tKUd68v94i/Dyq66caz4zA3J5eoXjdhPVL12clHNZbHAhfXwUy8kmOFYKepygOHTYI/GBbVn5TCKh7yf4QRloMrO4d7fKp67RyOpqGGaBzQWVPqgk0CGAPaogFTMuRgh5SdQsYTl8aSm8mZb1Am02l+cdolKO+LrODZcdhtIoX/aXMXADpOion8H2GE5agqcQr5dr+KVFZrL28oqG2Mg90mCUNzusaC3LMzloKdgqbEGUlxJqaxzPfsWMEjMpiRJClGzbGCsgPIJuVPD3QDAErJI1g0WOMIIyyFJElC3VFTkSU3FNSm7ACKiqwUwQ5jjJSdIsHrih1Dko8xATzYoe7J+UcZcFpB2QFkk7JQdiiNVTRY4wgjLIeWiiytE8+VZKrIau8LoCvaZZUbCInChF848qrs2CmNZRWUao5lgp3oOWZXW2QWHxmUi4e8HmGrVq3CBRdcgKamJkiShGeeeUY8FwgE8KMf/QjHHHMMSktL0dTUhG9+85vYv39/zDra2tqwYMECVFRUoKqqCgsXLkRPT4/J76T40KLstGbRUJCTqYvyjsORz3JopYcuUAWOJ2WwY97nyi+wHkpj5R1lU0erpBVHRpUd3lCQRkUUD3kNdnp7ezF16lTcfffdCc/19fXhgw8+wI033ogPPvgATz31FLZs2YILL7wwZrkFCxZg06ZNeOWVV/D8889j1apVuOqqq8x6C0WLFmWHe3ZyUXZaupKXn++ITjunzsmFT3yfnfymsaxxcR3MxKSxLNIGgKexOHSDVTzkNWydN28e5s2bl/S5yspKvPLKKzGP/fGPf8Txxx+P3bt3Y+TIkfjkk0/w0ksv4b333sNxxx0HALjrrrtw7rnn4vbbb0dTU5Ph76FY0dJr53C39iGgnExdlHeSX6doSF16bmI1lp2CHaug9E1ZZXQHNyhzKI1VPFjjCFNJZ2cnJElCVVUVAGD16tWoqqoSgQ4AzJkzBzabDWvWrEm5Hp/Ph66urpgfIhYtvXbkaqxslJ1Ir51Unp0d1FCwaBDKTnywQwblQYkVlZ0Sl0Oozfx3ojiwxhGmgoGBAfzoRz/C1772NVRUVAAAmpubUV9fH7Ocw+FATU0NmpubU65r+fLlqKysFD8jRowwdNsLkWFVkTuc5q6BtA3/QmEmeuQMKc/Cs5PBoLzjECk7xYI3bjZWIA9Tz08ePwSjakswd3Kjaa9JJMdt0aGsoxTqDik7xYN1jrA0BAIBfOUrXwFjDPfcc0/O61u2bBk6OzvFz549e3TYyuKivtwNp11CKMxwME0PnPY+v5hcXFOSfRorWZ8dxpg8KoKCnYKHKzs8jZWPQaCTmyqx8vozcMFUSnHnm5hqLLt1goqRtRTsFCOW1+h4oLNr1y689tprQtUBgMbGRrS0tMQsHwwG0dbWhsbG1Hdubrcbbrf2lMtgwmaTMLTSi91tfdjX3i88PPHwSqzqEmdWA/O4stPnD6HXF4xp4nWo24c+fwg2KTGXThQeovQ8waBMF5TBiDKNZS1lR76xIoNy8WCdIywJPNDZunUr/vvf/6K2tjbm+dmzZ6OjowPr1q0Tj7322msIh8OYNWuW2ZtbdMjl530pl8mlxw4Q6VDK757iU1m8meCwaq9l+nAQ2aPss8MYE54dp4kGZcI6KAMcq3h2gNiKLFJ2ioe8Kjs9PT3Ytm2b+H3Hjh3YsGEDampqMHToUHzpS1/CBx98gOeffx6hUEj4cGpqauByuTBx4kScc845+Pa3v417770XgUAAixcvxqWXXkqVWDqgpvycBzvZVGJx6svd2Nnah5ZuX4w3R3ROJnNyUcBnY4VZRNXJh0GZsA5W9eyMUKjIXqflkx+ESvJ6hL3//vuYPn06pk+fDgBYunQppk+fjptuugn79u3Ds88+i71792LatGkYOnSo+HnnnXfEOh5++GFMmDABZ511Fs4991ycfPLJuO+++/L1looKNY0FW3MYFcFJZVLmlVg0E6s4ULbeH/CHhUHZKnORCHOxYjUWEKvslLpJ2SkW8hq2nn766WC8VWUS0j3HqampwSOPPKLnZhFRuLKzV4WyU6dLsBPbWJB67BQXTrsNTruEQIihLxDMy7gIwjpYcRAoEBloXOqyo9cfojRWEWGdI4ywHMOrMqexhLKjceK5knrea6cnuWeHgp3iwaMwKfvzUHpOWAcrjosAIoOQv3b8SEwaWoGJQysy/wFREFBCkkiJ8Ox09IMxBklKNJKKhoI6KDstXXKwEw4z7GqNjoogz07R4HXa0T0QRH8gJJeeUzXWoMRjwannnP93/qR8bwKhM9Y6wghLMbTSC0mK9EM5HFVw4jmUwxBQTrJhoAe6BuALhuGwSaKbM1H4KHvtiGosO1VjDUZilR26FBHGQkcYkRKXwyaa/qUyKbf26KfsKA3K3K8zsqYkq/49hDWRe+2EybMzyHFbWNkhig86woi0DMvg2+GeHT0Mysouyp+TX6co8Sh67eRjXARhHWINypTKJIyFzjJEWoZXR8owkzUW7PUFxVDHXPvsABGVKBSdPbGTeuwUJby6pc8fFAZlSmEMTmKaCtIxQBgMHWFEWtI1FuSqjsdpy6lEs6bUBUmKNJvjQ0V5sDOmjoKdYoKnsZSeHTIoD05ixkVQsEMYDB1hRFrSNRY83CuPikhWqaUWh92G2lKeyor02uENBakSq7jgXZT7/SEaFzHIiRkESsEOYTB0hBFpSddY8HB37uZkjtKkHAyFsTtadj56CA0ALSbk+Vg0LmKwQ8oOYSZ0hBFpGZ5G2WmNppyG5NBQkKMMdvZ19CMYZnA5bGiqpLLzYkI5DJSaCg5uyLNDmAkdYURauLLTPRBE10Ag5jmu7GQ78VwJNykf6vEpBoCWwGajFEcx4RVprCAFO4McSmMRZkJHGJGWEpcD1SVOAIkmZa7s5FKJxVF2UaZp58ULV3Z6fEHw0XduMigPSjxOZRqLjgHCWCjYITKSqiLrsA4NBTnKLsqiEot67BQdXNnp6JNVQrqrH5xYdRAoUZzQEUZkJFVFFg92chkVwamvkD07O/hMLAp2ig6u7HT2y8EOjYsYnMR0UCaTOmEwNAiUyMiwKt5YMC6NJeZi6ajsdMuNBal7cvHBgx3u/7JJoHEgg5T6cjdOOqIWtaVu8uYRhkPBDpGRVGksUY2lY+l5c+cAfMFIV2ZSdooP3meHKzuUwhq82GwSHv6fE/K9GcQggYIdIiM8jbVXoewEQ2G09+lvUObjJ0pcdlGhRRQPIo0V9exQ+oIgCDOgMw2RkeFJlJ22Pj8Yi6QhqktyD3bK3A5xIQQilVi5dGUmrAkfK9LtCwIAXFSFQxCECVCwQ2SEKzuHe3wYiCov3K9TU+qCXYd8uyRJQt0BKIVVrPByY1527iJzMkEQJkDBDpGRqhKnuCPfH01libLzUv1STcpgh8ZEFCdK9Q4gzw5BEOZAZxoiI5Ikyb6daCqLKzt6+HU4So8ONRQsTnifHQ4FOwRBmAGdaQhViIqsOGVHj0osjlLZGVtHwU4xQsoOQRD5gM40hCpEY8F2Huzor+zUlZGyU+wkBDtUjUUQhAnQmYZQRbyy02qAssO7KJd7HKjRYZI6YT0ojUUQRD6gMw2hikRlR79REZxRUTVnYmMFlZ0XKU67FFO95yRlhyAIE6CmgoQqhscrO3ziuY7VWLPG1ODOr03HlGGVuq2TsBaSJMHrtKMn2meHBkASBGEGFOwQquDzsZq7BhAMhQ2pxpIkCRdObdJtfYQ18SiCHUpjEQRhBnSmIVRRX+6G0y4hFGZo7hrAIQM8O8TgwOuSTztkUCYIwgzoTEOowmaTMLQyksra0twNfzAMgIIdQjvKiixSdgiCMAM60xCq4b6djXs7AQClLntCdQ1BZMLrkrPnZFAmCMIM6ExDqIZXZH24twMAUEuqDpEFXqcijUXKDkEQJkBnGkI1vNfOxj0dAPQ1JxODB0pjEQRhNnSmIVTDlZ32vgAA8usQ2aFMfbopjUUQhAnQmYZQDVd2OHo2FCQGDx5SdgiCMBk60xCqGR7ttcPRs6EgMXigNBZBEGZDZxpCNY2VHiinOJCyQ2RDiSKNRdVYBEGYAZ1pCNW4HDY0lHvE71SNRWQDKTsEQZgNnWkITSh9O1SNRWSDR6HsUAdlgiDMgM40hCZ4RRYA1JGyQ2QBKTsEQZgNnWkITcQqOxTsENpRBjs09ZwgCDOgMw2hCa7s2G0SqrzOPG8NUYh4yaBMEITJ0JmG0ARXdmpKXbDZpAxLE0QilMYiCMJsHJkXIQiZ40ZVY0JjOU47qi7fm0IUKF4yKBMEYTIU7BCaKPc48dJ1p+Z7M4gChpQdgiDMhs40BEGYCo2LIAjCbOhMQxCEqcQMAqVghyAIE6AzDUEQpqJMY1E1FkEQZkBnGoIgTEU5G4vSWARBmAGdaQiCMJUYzw4pOwRBmABVYxEEYSoepx3nHtOIHl8INaU0X40gCOOhYIcgCNP504IZ+d4EgiAGEaQhEwRBEARR1FCwQxAEQRBEUUPBDkEQBEEQRQ0FOwRBEARBFDUU7BAEQRAEUdRQsEMQBEEQRFFDwQ5BEARBEEUNBTsEQRAEQRQ1eQ12Vq1ahQsuuABNTU2QJAnPPPNMzPOMMdx0000YOnQovF4v5syZg61bt8Ys09bWhgULFqCiogJVVVVYuHAhenp6THwXBEEQBEFYmbwGO729vZg6dSruvvvupM/fdtttuPPOO3HvvfdizZo1KC0txdy5czEwMCCWWbBgATZt2oRXXnkFzz//PFatWoWrrrrKrLdAEARBEITFkRhjLN8bAQCSJOHpp5/GRRddBCCi6jQ1NeH73/8+fvCDHwAAOjs70dDQgBUrVuDSSy/FJ598gkmTJuG9997DcccdBwB46aWXcO6552Lv3r1oampS9dpdXV2orKxEZ2cnKioqDHl/BEEQBEHoi9rrt2U9Ozt27EBzczPmzJkjHqusrMSsWbOwevVqAMDq1atRVVUlAh0AmDNnDmw2G9asWZNy3T6fD11dXTE/BEEQBEEUJ5YNdpqbmwEADQ0NMY83NDSI55qbm1FfXx/zvMPhQE1NjVgmGcuXL0dlZaX4GTFihM5bTxAEQRCEVbBssGMky5YtQ2dnp/jZs2dPvjeJIAiCIAiDcOR7A1LR2NgIADh48CCGDh0qHj948CCmTZsmlmlpaYn5u2AwiLa2NvH3yXC73XC73eJ3bluidBZBEARBFA78up3JfmzZYGfMmDFobGzEq6++KoKbrq4urFmzBtdccw0AYPbs2ejo6MC6deswY8YMAMBrr72GcDiMWbNmqX6t7u5uAKB0FkEQBEEUIN3d3aisrEz5fF6DnZ6eHmzbtk38vmPHDmzYsAE1NTUYOXIkrrvuOvzyl7/E+PHjMWbMGNx4441oamoSFVsTJ07EOeecg29/+9u49957EQgEsHjxYlx66aWqK7EAoKmpCXv27EF5eTkkSdLt/XV1dWHEiBHYs2cPVXmphPaZNmh/aYf2mTZof2mD9pd2ctlnjDF0d3dnvObnNdh5//33ccYZZ4jfly5dCgC4/PLLsWLFCvzwhz9Eb28vrrrqKnR0dODkk0/GSy+9BI/HI/7m4YcfxuLFi3HWWWfBZrNh/vz5uPPOOzVth81mw/Dhw/V5U0moqKigg14jtM+0QftLO7TPtEH7Sxu0v7ST7T5Lp+hwLNNnpxih/j3aoX2mDdpf2qF9pg3aX9qg/aUdM/bZoKzGIgiCIAhi8EDBjoG43W7cfPPNMZVfRHpon2mD9pd2aJ9pg/aXNmh/aceMfUZpLIIgCIIgihpSdgiCIAiCKGoo2CEIgiAIoqihYIcgCIIgiKKGgh2CIAiCIIoaCnYM5O6778bo0aPh8Xgwa9YsrF27Nt+bZAlWrVqFCy64AE1NTZAkCc8880zM84wx3HTTTRg6dCi8Xi/mzJmDrVu35mdjLcDy5csxc+ZMlJeXo76+HhdddBG2bNkSs8zAwAAWLVqE2tpalJWVYf78+Th48GCetjj/3HPPPZgyZYpoUjZ79my8+OKL4nnaX+m59dZbIUkSrrvuOvEY7bNYfvrTn0KSpJifCRMmiOdpfyWyb98+XHbZZaitrYXX68UxxxyD999/Xzxv5Lmfgh2DeOyxx7B06VLcfPPN+OCDDzB16lTMnTs3YXDpYKS3txdTp07F3XffnfT52267DXfeeSfuvfderFmzBqWlpZg7dy4GBgZM3lJrsHLlSixatAjvvvsuXnnlFQQCAZx99tno7e0VyyxZsgTPPfccnnjiCaxcuRL79+/HJZdcksetzi/Dhw/HrbfeinXr1uH999/HmWeeiS9+8YvYtGkTANpf6Xjvvffw5z//GVOmTIl5nPZZIpMnT8aBAwfEz1tvvSWeo/0VS3t7O0466SQ4nU68+OKL2Lx5M37729+iurpaLGPouZ8RhnD88cezRYsWid9DoRBrampiy5cvz+NWWQ8A7Omnnxa/h8Nh1tjYyH7zm9+Ixzo6Opjb7Wb/93//l4cttB4tLS0MAFu5ciVjLLJ/nE4ne+KJJ8Qyn3zyCQPAVq9ena/NtBzV1dXs/vvvp/2Vhu7ubjZ+/Hj2yiuvsNNOO41973vfY4zRMZaMm2++mU2dOjXpc7S/EvnRj37ETj755JTPG33uJ2XHAPx+P9atW4c5c+aIx2w2G+bMmYPVq1fnccusz44dO9Dc3Byz7yorKzFr1izad1E6OzsBADU1NQCAdevWIRAIxOyzCRMmYOTIkbTPAIRCITz66KPo7e3F7NmzaX+lYdGiRTjvvPNi9g1Ax1gqtm7diqamJowdOxYLFizA7t27AdD+Ssazzz6L4447Dl/+8pdRX1+P6dOn4y9/+Yt43uhzPwU7BnD48GGEQiE0NDTEPN7Q0IDm5uY8bVVhwPcP7bvkhMNhXHfddTjppJNw9NFHA4jsM5fLhaqqqphlB/s+++ijj1BWVga3242rr74aTz/9NCZNmkT7KwWPPvooPvjgAyxfvjzhOdpnicyaNQsrVqzASy+9hHvuuQc7duzAKaecgu7ubtpfSfj8889xzz33YPz48Xj55ZdxzTXX4Lvf/S7+9re/ATD+3J/XqecEQWhj0aJF+Pjjj2O8AURyjjrqKGzYsAGdnZ148skncfnll2PlypX53ixLsmfPHnzve9/DK6+8Ao/Hk+/NKQjmzZsn/j9lyhTMmjULo0aNwuOPPw6v15vHLbMm4XAYxx13HH71q18BAKZPn46PP/4Y9957Ly6//HLDX5+UHQMYMmQI7HZ7gvP+4MGDaGxszNNWFQZ8/9C+S2Tx4sV4/vnn8frrr2P48OHi8cbGRvj9fnR0dMQsP9j3mcvlwhFHHIEZM2Zg+fLlmDp1Ku644w7aX0lYt24dWlpacOyxx8LhcMDhcGDlypW488474XA40NDQQPssA1VVVTjyyCOxbds2OsaSMHToUEyaNCnmsYkTJ4rUn9Hnfgp2DMDlcmHGjBl49dVXxWPhcBivvvoqZs+encctsz5jxoxBY2NjzL7r6urCmjVrBu2+Y4xh8eLFePrpp/Haa69hzJgxMc/PmDEDTqczZp9t2bIFu3fvHrT7LBnhcBg+n4/2VxLOOussfPTRR9iwYYP4Oe6447BgwQLxf9pn6enp6cH27dsxdOhQOsaScNJJJyW0zPjss88watQoACac+3O2OBNJefTRR5nb7WYrVqxgmzdvZldddRWrqqpizc3N+d60vNPd3c3Wr1/P1q9fzwCw3/3ud2z9+vVs165djDHGbr31VlZVVcX+9a9/sQ8//JB98YtfZGPGjGH9/f153vL8cM0117DKykr2xhtvsAMHDoifvr4+sczVV1/NRo4cyV577TX2/vvvs9mzZ7PZs2fncavzyw033MBWrlzJduzYwT788EN2ww03MEmS2H/+8x/GGO0vNSirsRijfRbP97//ffbGG2+wHTt2sLfffpvNmTOHDRkyhLW0tDDGaH/Fs3btWuZwONgtt9zCtm7dyh5++GFWUlLCHnroIbGMked+CnYM5K677mIjR45kLpeLHX/88ezdd9/N9yZZgtdff50BSPi5/PLLGWOREsQbb7yRNTQ0MLfbzc466yy2ZcuW/G50Hkm2rwCwBx98UCzT39/PvvOd77Dq6mpWUlLCLr74YnbgwIH8bXSeufLKK9moUaOYy+VidXV17KyzzhKBDmO0v9QQH+zQPovlq1/9Khs6dChzuVxs2LBh7Ktf/Srbtm2beJ72VyLPPfccO/roo5nb7WYTJkxg9913X8zzRp77JcYYy10fIgiCIAiCsCbk2SEIgiAIoqihYIcgCIIgiKKGgh2CIAiCIIoaCnYIgiAIgihqKNghCIIgCKKooWCHIAiCIIiihoIdgiAIgiCKGgp2CIIoWHbu3AlJkrBhwwbDXuOKK67ARRddZNj6CYIwHgp2CILIG1dccQUkSUr4Oeecc1T9/YgRI3DgwAEcffTRBm8pQRCFjCPfG0AQxODmnHPOwYMPPhjzmNvtVvW3drt90E6RJghCPaTsEASRV9xuNxobG2N+qqurAQCSJOGee+7BvHnz4PV6MXbsWDz55JPib+PTWO3t7ViwYAHq6urg9Xoxfvz4mEDqo48+wplnngmv14va2lpcddVV6OnpEc+HQiEsXboUVVVVqK2txQ9/+EPET9QJh8NYvnw5xowZA6/Xi6lTp8ZsE0EQ1oOCHYIgLM2NN96I+fPnY+PGjViwYAEuvfRSfPLJJymX3bx5M1588UV88sknuOeeezBkyBAAQG9vL+bOnYvq6mq89957eOKJJ/Df//4XixcvFn//29/+FitWrMADDzyAt956C21tbXj66adjXmP58uX4+9//jnvvvRebNm3CkiVLcNlll2HlypXG7QSCIHJDl3GiBEEQWXD55Zczu93OSktLY35uueUWxlhk4vvVV18d8zezZs1i11xzDWOMsR07djAAbP369Ywxxi644AL2rW99K+lr3Xfffay6upr19PSIx1544QVms9lYc3MzY4yxoUOHsttuu008HwgE2PDhw9kXv/hFxhhjAwMDrKSkhL3zzjsx6164cCH72te+lv2OIAjCUMizQxBEXjnjjDNwzz33xDxWU1Mj/j979uyY52bPnp2y+uqaa67B/Pnz8cEHH+Dss8/GRRddhBNPPBEA8Mknn2Dq1KkoLS0Vy5900kkIh8PYsmULPB4PDhw4gFmzZonnHQ4HjjvuOJHK2rZtG/r6+vCFL3wh5nX9fj+mT5+u/c0TBGEKFOwQBJFXSktLccQRR+iyrnnz5mHXrl3497//jVdeeQVnnXUWFi1ahNtvv12X9XN/zwsvvIBhw4bFPKfWVE0QhPmQZ4cgCEvz7rvvJvw+ceLElMvX1dXh8ssvx0MPPYQ//OEPuO+++wAAEydOxMaNG9Hb2yuWffvtt2Gz2XDUUUehsrISQ4cOxZo1a8TzwWAQ69atE79PmjQJbrcbu3fvxhFHHBHzM2LECL3eMkEQOkPKDkEQecXn86G5uTnmMYfDIYzFTzzxBI477jicfPLJePjhh7F27Vr89a9/Tbqum266CTNmzMDkyZPh8/nw/PPPi8Bowf9v745RFAYCMAr/sRAMKDYiOYBELAU7PYRYigxYChJEG21ULLTKNUxnqwfwDlZilyKNuYBuF9hlO11WhveVQzLFVI/JMOn3tVwuZYzRarVSkiQaj8caDAaqVquSpCAItNvtVKvVVK/XFYah7vd7Nn+xWNRsNtNkMtHj8VC73VaapjqfzyqVSjLG/MEKAXgVsQPgXx2PR3me923M931dLhdJ0nq9VhRFGo1G8jxP+/1ejUbj17ny+bzm87lut5sKhYI6nY6iKJIkua6r0+mkIAjUarXkuq56vZ7CMMzen06niuNYxhjlcjkNh0N1u12laZo9s9lsVKlUtN1udb1eVS6X1Ww2tVgs3r00AN7EeT5/XCIBAB/CcRwdDgd+1wDgJZzZAQAAViN2AACA1TizA+Bj8ZUdwDuwswMAAKxG7AAAAKsROwAAwGrEDgAAsBqxAwAArEbsAAAAqxE7AADAasQOAACwGrEDAACs9gXnTrZXIlmG8AAAAABJRU5ErkJggg==", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# Learning Curve RL\n", "def plot_learning_curve(env, model):\n", "\n", " plt.plot(rl_scores)\n", " plt.xlabel('Episode')\n", " plt.ylabel('Total Score')\n", " plt.title('Learning Curve Reinforcement Learning')\n", " plt.show()\n", "\n", "plot_learning_curve(env, model)\n" ] }, { "cell_type": "code", "execution_count": 925, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "PzWIpRfXJour", "outputId": "1d22b23d-b3f3-471e-be23-886632e61bb4" }, "outputs": [], "source": [ "# Generate data for KNN\n", "states = []\n", "actions = []\n", "for _ in range(3600): # generate 3600 samples\n", " state = env.reset()\n", " state = tuple(state)\n", "\n", " action = env.action_space.sample()\n", " states.append(state)\n", " actions.append(action)" ] }, { "cell_type": "code", "execution_count": 1045, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Successfully generate! saved to 'generated_data_knn.xlsx'\n" ] } ], "source": [ "# For save generate data KNN\n", "# Convert data to DataFrame\n", "data = pd.DataFrame(states, columns=['Gsize', 'Bratio', 'Btime', 'Temperature'])\n", "data['Action'] = actions\n", "\n", "# Save to Excel\n", "data.to_excel('generated_data_knn.xlsx', index=False)\n", "\n", "print(\"Successfully generate! saved to 'generated_data_knn.xlsx'\")" ] }, { "cell_type": "code", "execution_count": 991, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "k5aw7n6_Alqy", "outputId": "9d77bdc0-068a-4cfe-effc-7e1f535302d4" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Episode 1, Iteration 60, State: [2, 15.0, 124.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 3\n", "Episode 1, Iteration 60, State: [1, 16.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 1, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 1, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 12\n", "Episode 1, Iteration 60, State: [6, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 16\n", "Episode 1, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 19\n", "Episode 1, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 21\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 23\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 25\n", "Episode 1, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 27\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 29\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 31\n", "Episode 1, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 33\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 35\n", "Episode 1, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 37\n", "Episode 1, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 39\n", "Episode 1, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 42\n", "Episode 1, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 44\n", "Episode 1, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 46\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 48\n", "Episode 1, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 50\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 52\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 54\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 56\n", "Episode 1, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 59\n", "Episode 1, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 1, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 1, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 67\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 1, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 1, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80\n", "Episode 1, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 83\n", "Episode 1, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 87\n", "Episode 1, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90\n", "Episode 1, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 94\n", "Episode 1, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 97\n", "Episode 1, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 100\n", "Episode 1, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 1, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 105\n", "Episode 1, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 107\n", "Episode 1, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 109\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 111\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 113\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 115\n", "Episode 1, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 1, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 121\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 123\n", "Episode 1, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 125\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 127\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 129\n", "Episode 1, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 131\n", "Episode 1, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 1, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 135\n", "Episode 1, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 137\n", "Episode 1, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 139\n", "Episode: 1 Best Action: 0 Best evaluation action: 0\n", "Episode: 1 Score: 139 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 2, Iteration 60, State: [6, 16.0, 117.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 3\n", "Episode 2, Iteration 60, State: [1, 15.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 6\n", "Episode 2, Iteration 60, State: [3, 16.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 9\n", "Episode 2, Iteration 60, State: [3, 15.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 12\n", "Episode 2, Iteration 60, State: [5, 16.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 15\n", "Episode 2, Iteration 60, State: [7, 16.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 18\n", "Episode 2, Iteration 60, State: [4, 16.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 21\n", "Episode 2, Iteration 60, State: [4, 15.0, 120.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 24\n", "Episode 2, Iteration 60, State: [2, 15.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 27\n", "Episode 2, Iteration 60, State: [7, 16.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 30\n", "Episode 2, Iteration 60, State: [7, 16.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 33\n", "Episode 2, Iteration 60, State: [1, 16.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 36\n", "Episode 2, Iteration 60, State: [7, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 40\n", "Episode 2, Iteration 60, State: [1, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 42\n", "Episode 2, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 46\n", "Episode 2, Iteration 60, State: [3, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 48\n", "Episode 2, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 50\n", "Episode 2, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 54\n", "Episode 2, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 56\n", "Episode 2, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 58\n", "Episode 2, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 60\n", "Episode 2, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 62\n", "Episode 2, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 64\n", "Episode 2, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 66\n", "Episode 2, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 68\n", "Episode 2, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 72\n", "Episode 2, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 74\n", "Episode 2, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 76\n", "Episode 2, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 78\n", "Episode 2, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 82\n", "Episode 2, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 84\n", "Episode 2, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 86\n", "Episode 2, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 88\n", "Episode 2, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 90\n", "Episode 2, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 92\n", "Episode 2, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 94\n", "Episode 2, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 96\n", "Episode 2, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 98\n", "Episode 2, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 102\n", "Episode 2, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 104\n", "Episode 2, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 106\n", "Episode 2, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 108\n", "Episode 2, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 110\n", "Episode 2, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 112\n", "Episode 2, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 114\n", "Episode 2, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 116\n", "Episode 2, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 118\n", "Episode 2, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 120\n", "Episode 2, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 122\n", "Episode 2, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 124\n", "Episode 2, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 126\n", "Episode 2, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 128\n", "Episode 2, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 130\n", "Episode 2, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 132\n", "Episode 2, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 134\n", "Episode 2, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 136\n", "Episode 2, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 138\n", "Episode 2, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 140\n", "Episode 2, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 142\n", "Episode 2, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 144\n", "Episode: 2 Best Action: 0 Best evaluation action: 0\n", "Episode: 2 Score: 144 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 3, Iteration 60, State: [3, 15.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 3, Iteration 60, State: [1, 16.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 6\n", "Episode 3, Iteration 60, State: [4, 15.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9\n", "Episode 3, Iteration 60, State: [7, 15.0, 120.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 13\n", "Episode 3, Iteration 60, State: [1, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 16\n", "Episode 3, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 19\n", "Episode 3, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 21\n", "Episode 3, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 24\n", "Episode 3, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 26\n", "Episode 3, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 29\n", "Episode 3, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 32\n", "Episode 3, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 35\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 37\n", "Episode 3, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 40\n", "Episode 3, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 43\n", "Episode 3, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 45\n", "Episode 3, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48\n", "Episode 3, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 52\n", "Episode 3, Iteration 60, State: [4, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 55\n", "Episode 3, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 57\n", "Episode 3, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 60\n", "Episode 3, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 63\n", "Episode 3, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66\n", "Episode 3, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 70\n", "Episode 3, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 73\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 75\n", "Episode 3, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 78\n", "Episode 3, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 83\n", "Episode 3, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 86\n", "Episode 3, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 88\n", "Episode 3, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 91\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 93\n", "Episode 3, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96\n", "Episode 3, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99\n", "Episode 3, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 3, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 104\n", "Episode 3, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 107\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 109\n", "Episode 3, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 112\n", "Episode 3, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 115\n", "Episode 3, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 119\n", "Episode 3, Iteration 60, State: [1, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 122\n", "Episode 3, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 125\n", "Episode 3, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 127\n", "Episode 3, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 130\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 132\n", "Episode 3, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 135\n", "Episode 3, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 137\n", "Episode 3, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 140\n", "Episode 3, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 143\n", "Episode 3, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 147\n", "Episode 3, Iteration 60, State: [2, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 150\n", "Episode 3, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 152\n", "Episode 3, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 155\n", "Episode 3, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 157\n", "Episode 3, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 160\n", "Episode 3, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 164\n", "Episode 3, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 167\n", "Episode 3, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 169\n", "Episode: 3 Best Action: 0 Best evaluation action: 0\n", "Episode: 3 Score: 169 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 4, Iteration 60, State: [5, 15.0, 115.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4\n", "Episode 4, Iteration 60, State: [5, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 7\n", "Episode 4, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 4, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 4, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 16\n", "Episode 4, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 19\n", "Episode 4, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 22\n", "Episode 4, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 25\n", "Episode 4, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 26\n", "Episode 4, Iteration 60, State: [7, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 27\n", "Episode 4, Iteration 60, State: [7, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 28\n", "Episode 4, Iteration 60, State: [2, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 30\n", "Episode 4, Iteration 60, State: [2, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 32\n", "Episode 4, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 34\n", "Episode 4, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 36\n", "Episode 4, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 38\n", "Episode 4, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 40\n", "Episode 4, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 43\n", "Episode 4, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 46\n", "Episode 4, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 49\n", "Episode 4, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 52\n", "Episode 4, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 55\n", "Episode 4, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 57\n", "Episode 4, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 59\n", "Episode 4, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 62\n", "Episode 4, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 64\n", "Episode 4, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 66\n", "Episode 4, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 69\n", "Episode 4, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 72\n", "Episode 4, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 75\n", "Episode 4, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 76\n", "Episode 4, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 79\n", "Episode 4, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 80\n", "Episode 4, Iteration 60, State: [3, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 81\n", "Episode 4, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 82\n", "Episode 4, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 83\n", "Episode 4, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 84\n", "Episode 4, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 85\n", "Episode 4, Iteration 60, State: [6, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 86\n", "Episode 4, Iteration 60, State: [5, 23.0, 165.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 170.0, 98.0), Reward: 1, , Cumulative Score: 87\n", "Episode 4, Iteration 60, State: [7, 24.0, 170.0, 98.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 25.0, 175.0, 99.0), Reward: 1, , Cumulative Score: 88\n", "Episode 4, Iteration 60, State: [4, 25.0, 175.0, 99.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 24.0, 170.0, 98.0), Reward: 2, , Cumulative Score: 90\n", "Episode 4, Iteration 60, State: [3, 24.0, 170.0, 98.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 23.0, 165.0, 97.0), Reward: 2, , Cumulative Score: 92\n", "Episode 4, Iteration 60, State: [2, 23.0, 165.0, 97.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 94\n", "Episode 4, Iteration 60, State: [2, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 96\n", "Episode 4, Iteration 60, State: [5, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 98\n", "Episode 4, Iteration 60, State: [4, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 100\n", "Episode 4, Iteration 60, State: [2, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 102\n", "Episode 4, Iteration 60, State: [1, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 104\n", "Episode 4, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 106\n", "Episode 4, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 108\n", "Episode 4, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 109\n", "Episode 4, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 111\n", "Episode 4, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 113\n", "Episode 4, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 114\n", "Episode 4, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 117\n", "Episode 4, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 118\n", "Episode 4, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 4, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 121\n", "Episode 4, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 124\n", "Episode: 4 Best Action: 0 Best evaluation action: 2\n", "Episode: 4 Score: 124 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 5, Iteration 60, State: [4, 15.0, 122.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 5, Iteration 60, State: [7, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 7\n", "Episode 5, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 5, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 12\n", "Episode 5, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 16\n", "Episode 5, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 18\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 20\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 23\n", "Episode 5, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 25\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 27\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 29\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 31\n", "Episode 5, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 33\n", "Episode 5, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 35\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 38\n", "Episode 5, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 41\n", "Episode 5, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 43\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 46\n", "Episode 5, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 50\n", "Episode 5, Iteration 60, State: [7, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 53\n", "Episode 5, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 64\n", "Episode 5, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 67\n", "Episode 5, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 71\n", "Episode 5, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 74\n", "Episode 5, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 76\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 79\n", "Episode 5, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 82\n", "Episode 5, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 84\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 86\n", "Episode 5, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 88\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 90\n", "Episode 5, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 92\n", "Episode 5, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 94\n", "Episode 5, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 96\n", "Episode 5, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100\n", "Episode 5, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 5, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 5, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 5, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 5, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 112\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 5, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 5, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 118\n", "Episode 5, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 123\n", "Episode 5, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 126\n", "Episode 5, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 129\n", "Episode 5, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 131\n", "Episode 5, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 134\n", "Episode 5, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 137\n", "Episode 5, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 140\n", "Episode 5, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 143\n", "Episode 5, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 146\n", "Episode: 5 Best Action: 0 Best evaluation action: 0\n", "Episode: 5 Score: 146 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 18\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 21\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 24\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 27\n", "Episode 6, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 30\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 36\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 39\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 42\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 45\n", "Episode 6, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 48\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 51\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 54\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 6, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 60\n", "Episode 6, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 63\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 66\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 69\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 72\n", "Episode 6, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 75\n", "Episode 6, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 78\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 6, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 87\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 90\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 93\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 96\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 99\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 102\n", "Episode 6, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 105\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 108\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 111\n", "Episode 6, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 114\n", "Episode 6, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 117\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 120\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 123\n", "Episode 6, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 126\n", "Episode 6, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 129\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 132\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 135\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 138\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 141\n", "Episode 6, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 144\n", "Episode 6, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 147\n", "Episode 6, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 150\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 153\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 156\n", "Episode 6, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 159\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 162\n", "Episode 6, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 165\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 168\n", "Episode 6, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 171\n", "Episode 6, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 174\n", "Episode 6, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 177\n", "Episode 6, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 180\n", "Episode: 6 Best Action: 1 Best evaluation action: 2\n", "Episode: 6 Score: 180 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 90.0\n", "Episode 7, Iteration 60, State: [2, 14.0, 117.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4\n", "Episode 7, Iteration 60, State: [4, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 7\n", "Episode 7, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 7, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 7, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 16\n", "Episode 7, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 18\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 21\n", "Episode 7, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 23\n", "Episode 7, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 25\n", "Episode 7, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 27\n", "Episode 7, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 29\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 31\n", "Episode 7, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 33\n", "Episode 7, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 35\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 38\n", "Episode 7, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 41\n", "Episode 7, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 44\n", "Episode 7, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 46\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 49\n", "Episode 7, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 7, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 7, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 7, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 7, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 7, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 64\n", "Episode 7, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 66\n", "Episode 7, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 68\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 70\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 72\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 75\n", "Episode 7, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80\n", "Episode 7, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 82\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 84\n", "Episode 7, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 86\n", "Episode 7, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 88\n", "Episode 7, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 90\n", "Episode 7, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 92\n", "Episode 7, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 94\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 97\n", "Episode 7, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 100\n", "Episode 7, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 7, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 7, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 113\n", "Episode 7, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 115\n", "Episode 7, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 7, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 120\n", "Episode 7, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 124\n", "Episode 7, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 127\n", "Episode 7, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 130\n", "Episode 7, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 133\n", "Episode 7, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 136\n", "Episode 7, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 139\n", "Episode 7, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 142\n", "Episode 7, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 146\n", "Episode 7, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 149\n", "Episode: 7 Best Action: 0 Best evaluation action: 2\n", "Episode: 7 Score: 149 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 8, Iteration 60, State: [4, 16.0, 122.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 8, Iteration 60, State: [1, 17.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6\n", "Episode 8, Iteration 60, State: [7, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9\n", "Episode 8, Iteration 60, State: [3, 16.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12\n", "Episode 8, Iteration 60, State: [3, 15.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 15\n", "Episode 8, Iteration 60, State: [1, 16.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 18\n", "Episode 8, Iteration 60, State: [4, 15.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 21\n", "Episode 8, Iteration 60, State: [4, 15.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 24\n", "Episode 8, Iteration 60, State: [5, 15.0, 120.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 28\n", "Episode 8, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 31\n", "Episode 8, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 33\n", "Episode 8, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 36\n", "Episode 8, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 39\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 42\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 45\n", "Episode 8, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48\n", "Episode 8, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 52\n", "Episode 8, Iteration 60, State: [4, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 55\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 58\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 61\n", "Episode 8, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 63\n", "Episode 8, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66\n", "Episode 8, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 68\n", "Episode 8, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 71\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 74\n", "Episode 8, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 76\n", "Episode 8, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 79\n", "Episode 8, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 81\n", "Episode 8, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84\n", "Episode 8, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 86\n", "Episode 8, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 89\n", "Episode 8, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 93\n", "Episode 8, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99\n", "Episode 8, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 8, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 105\n", "Episode 8, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 108\n", "Episode 8, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 112\n", "Episode 8, Iteration 60, State: [4, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 115\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 118\n", "Episode 8, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 120\n", "Episode 8, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 123\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 126\n", "Episode 8, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 129\n", "Episode 8, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 131\n", "Episode 8, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 134\n", "Episode 8, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 138\n", "Episode 8, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 141\n", "Episode 8, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 143\n", "Episode 8, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 146\n", "Episode 8, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 148\n", "Episode 8, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 151\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 154\n", "Episode 8, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 156\n", "Episode 8, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 159\n", "Episode 8, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 163\n", "Episode 8, Iteration 60, State: [1, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 166\n", "Episode 8, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 169\n", "Episode 8, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 171\n", "Episode 8, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 174\n", "Episode: 8 Best Action: 0 Best evaluation action: 2\n", "Episode: 8 Score: 174 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 9, Iteration 60, State: [1, 16.0, 118.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 3\n", "Episode 9, Iteration 60, State: [5, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 7\n", "Episode 9, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 9\n", "Episode 9, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 11\n", "Episode 9, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 13\n", "Episode 9, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 15\n", "Episode 9, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 17\n", "Episode 9, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 21\n", "Episode 9, Iteration 60, State: [1, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 23\n", "Episode 9, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 25\n", "Episode 9, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 27\n", "Episode 9, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 29\n", "Episode 9, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 31\n", "Episode 9, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 35\n", "Episode 9, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 37\n", "Episode 9, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 39\n", "Episode 9, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 41\n", "Episode 9, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 43\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 45\n", "Episode 9, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 47\n", "Episode 9, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 49\n", "Episode 9, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 51\n", "Episode 9, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 53\n", "Episode 9, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 55\n", "Episode 9, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 59\n", "Episode 9, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 61\n", "Episode 9, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 63\n", "Episode 9, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 65\n", "Episode 9, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 67\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 69\n", "Episode 9, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 71\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 73\n", "Episode 9, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 75\n", "Episode 9, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 77\n", "Episode 9, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 79\n", "Episode 9, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 81\n", "Episode 9, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 83\n", "Episode 9, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 85\n", "Episode 9, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 87\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 89\n", "Episode 9, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 91\n", "Episode 9, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 93\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 95\n", "Episode 9, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 97\n", "Episode 9, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 99\n", "Episode 9, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 101\n", "Episode 9, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 103\n", "Episode 9, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 105\n", "Episode 9, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 107\n", "Episode 9, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 109\n", "Episode 9, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 111\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 113\n", "Episode 9, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 115\n", "Episode 9, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 117\n", "Episode 9, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 119\n", "Episode 9, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 121\n", "Episode 9, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 123\n", "Episode 9, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 125\n", "Episode 9, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 127\n", "Episode 9, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 129\n", "Episode: 9 Best Action: 0 Best evaluation action: 2\n", "Episode: 9 Score: 129 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 10, Iteration 60, State: [2, 16.0, 123.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 3\n", "Episode 10, Iteration 60, State: [6, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 7\n", "Episode 10, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 9\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 11\n", "Episode 10, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 13\n", "Episode 10, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 15\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 17\n", "Episode 10, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 19\n", "Episode 10, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 21\n", "Episode 10, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 23\n", "Episode 10, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 27\n", "Episode 10, Iteration 60, State: [2, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 29\n", "Episode 10, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 31\n", "Episode 10, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 33\n", "Episode 10, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 35\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 37\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 39\n", "Episode 10, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 41\n", "Episode 10, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 43\n", "Episode 10, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 45\n", "Episode 10, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 47\n", "Episode 10, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 49\n", "Episode 10, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 51\n", "Episode 10, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 53\n", "Episode 10, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 55\n", "Episode 10, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 57\n", "Episode 10, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 59\n", "Episode 10, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 61\n", "Episode 10, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 63\n", "Episode 10, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 65\n", "Episode 10, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 67\n", "Episode 10, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 69\n", "Episode 10, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 71\n", "Episode 10, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 73\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 75\n", "Episode 10, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 77\n", "Episode 10, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 81\n", "Episode 10, Iteration 60, State: [3, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 83\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 85\n", "Episode 10, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 89\n", "Episode 10, Iteration 60, State: [2, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 91\n", "Episode 10, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 93\n", "Episode 10, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 95\n", "Episode 10, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 97\n", "Episode 10, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 99\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 101\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 103\n", "Episode 10, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 105\n", "Episode 10, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 107\n", "Episode 10, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 109\n", "Episode 10, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 111\n", "Episode 10, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 113\n", "Episode 10, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 115\n", "Episode 10, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 117\n", "Episode 10, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 119\n", "Episode 10, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 121\n", "Episode 10, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 123\n", "Episode 10, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 125\n", "Episode 10, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 127\n", "Episode 10, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 129\n", "Episode: 10 Best Action: 0 Best evaluation action: 0\n", "Episode: 10 Score: 129 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 11, Iteration 60, State: [1, 14.0, 121.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 11, Iteration 60, State: [4, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 7\n", "Episode 11, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 11, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 12\n", "Episode 11, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 11, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 17\n", "Episode 11, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 19\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 21\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 23\n", "Episode 11, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 25\n", "Episode 11, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 27\n", "Episode 11, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 30\n", "Episode 11, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 33\n", "Episode 11, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 36\n", "Episode 11, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 40\n", "Episode 11, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 43\n", "Episode 11, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 46\n", "Episode 11, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 48\n", "Episode 11, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 51\n", "Episode 11, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 11, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 11, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 11, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 11, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 11, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 11, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 67\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 11, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 11, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 11, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80\n", "Episode 11, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 83\n", "Episode 11, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 86\n", "Episode 11, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 88\n", "Episode 11, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 90\n", "Episode 11, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 92\n", "Episode 11, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 94\n", "Episode 11, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 96\n", "Episode 11, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98\n", "Episode 11, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 11, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108\n", "Episode 11, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 112\n", "Episode 11, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 115\n", "Episode 11, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 118\n", "Episode 11, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 11, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 122\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 124\n", "Episode 11, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 126\n", "Episode 11, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 128\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 130\n", "Episode 11, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 132\n", "Episode 11, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 134\n", "Episode 11, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 137\n", "Episode 11, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 140\n", "Episode: 11 Best Action: 0 Best evaluation action: 2\n", "Episode: 11 Score: 140 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 12, Iteration 60, State: [4, 14.0, 115.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3\n", "Episode 12, Iteration 60, State: [1, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 4\n", "Episode 12, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 5\n", "Episode 12, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 6\n", "Episode 12, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 12, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 12\n", "Episode 12, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 12, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 17\n", "Episode 12, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 20\n", "Episode 12, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 23\n", "Episode 12, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 26\n", "Episode 12, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 29\n", "Episode 12, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 32\n", "Episode 12, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 33\n", "Episode 12, Iteration 60, State: [4, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 34\n", "Episode 12, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 36\n", "Episode 12, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 38\n", "Episode 12, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 39\n", "Episode 12, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 12, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 42\n", "Episode 12, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 44\n", "Episode 12, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 45\n", "Episode 12, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 47\n", "Episode 12, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 48\n", "Episode 12, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 50\n", "Episode 12, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 52\n", "Episode 12, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 55\n", "Episode 12, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 58\n", "Episode 12, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 61\n", "Episode 12, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 62\n", "Episode 12, Iteration 60, State: [4, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 63\n", "Episode 12, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 65\n", "Episode 12, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 66\n", "Episode 12, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 68\n", "Episode 12, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 69\n", "Episode 12, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 71\n", "Episode 12, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 72\n", "Episode 12, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 74\n", "Episode 12, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 76\n", "Episode 12, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 78\n", "Episode 12, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 12, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 84\n", "Episode 12, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 87\n", "Episode 12, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 89\n", "Episode 12, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 92\n", "Episode 12, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 95\n", "Episode 12, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 98\n", "Episode 12, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 101\n", "Episode 12, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 104\n", "Episode 12, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 107\n", "Episode 12, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 110\n", "Episode 12, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 113\n", "Episode 12, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 116\n", "Episode 12, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 117\n", "Episode 12, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 118\n", "Episode 12, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 120\n", "Episode 12, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 121\n", "Episode 12, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 123\n", "Episode 12, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 124\n", "Episode 12, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 125\n", "Episode: 12 Best Action: 1 Best evaluation action: 2\n", "Episode: 12 Score: 125 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 13, Iteration 60, State: [5, 15.0, 115.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 13, Iteration 60, State: [6, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 13, Iteration 60, State: [1, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 13, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 13, Iteration 60, State: [3, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 13, Iteration 60, State: [4, 15.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18\n", "Episode 13, Iteration 60, State: [6, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 22\n", "Episode 13, Iteration 60, State: [1, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 25\n", "Episode 13, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 28\n", "Episode 13, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 31\n", "Episode 13, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 34\n", "Episode 13, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 36\n", "Episode 13, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 38\n", "Episode 13, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 40\n", "Episode 13, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 43\n", "Episode 13, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 46\n", "Episode 13, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 49\n", "Episode 13, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 52\n", "Episode 13, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 55\n", "Episode 13, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 13, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 13, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 13, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 13, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 13, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 67\n", "Episode 13, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 13, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 13, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 13, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75\n", "Episode 13, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 13, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 79\n", "Episode 13, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 81\n", "Episode 13, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 83\n", "Episode 13, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 85\n", "Episode 13, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 87\n", "Episode 13, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 89\n", "Episode 13, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 92\n", "Episode 13, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 95\n", "Episode 13, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 97\n", "Episode 13, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 99\n", "Episode 13, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 101\n", "Episode 13, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 103\n", "Episode 13, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 105\n", "Episode 13, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 107\n", "Episode 13, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 109\n", "Episode 13, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 112\n", "Episode 13, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 116\n", "Episode 13, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 119\n", "Episode 13, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 122\n", "Episode 13, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 125\n", "Episode 13, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 128\n", "Episode 13, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 131\n", "Episode 13, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 13, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 135\n", "Episode 13, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 138\n", "Episode 13, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 141\n", "Episode 13, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 143\n", "Episode 13, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 145\n", "Episode 13, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 147\n", "Episode 13, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 149\n", "Episode: 13 Best Action: 0 Best evaluation action: 0\n", "Episode: 13 Score: 149 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 14, Iteration 60, State: [4, 16.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 14, Iteration 60, State: [4, 17.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6\n", "Episode 14, Iteration 60, State: [6, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9\n", "Episode 14, Iteration 60, State: [3, 16.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12\n", "Episode 14, Iteration 60, State: [1, 15.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 15\n", "Episode 14, Iteration 60, State: [5, 16.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 18\n", "Episode 14, Iteration 60, State: [5, 15.0, 120.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 22\n", "Episode 14, Iteration 60, State: [2, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 25\n", "Episode 14, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 29\n", "Episode 14, Iteration 60, State: [2, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 32\n", "Episode 14, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 35\n", "Episode 14, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 37\n", "Episode 14, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 40\n", "Episode 14, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 42\n", "Episode 14, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 45\n", "Episode 14, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48\n", "Episode 14, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 52\n", "Episode 14, Iteration 60, State: [4, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 55\n", "Episode 14, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 58\n", "Episode 14, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 60\n", "Episode 14, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 63\n", "Episode 14, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66\n", "Episode 14, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 68\n", "Episode 14, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 71\n", "Episode 14, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 73\n", "Episode 14, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 76\n", "Episode 14, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 78\n", "Episode 14, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81\n", "Episode 14, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 83\n", "Episode 14, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 86\n", "Episode 14, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 88\n", "Episode 14, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 91\n", "Episode 14, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 94\n", "Episode 14, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 97\n", "Episode 14, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 99\n", "Episode 14, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 14, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 104\n", "Episode 14, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 107\n", "Episode 14, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 111\n", "Episode 14, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 114\n", "Episode 14, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 117\n", "Episode 14, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 120\n", "Episode 14, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 122\n", "Episode 14, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 125\n", "Episode 14, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 129\n", "Episode 14, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 132\n", "Episode 14, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 135\n", "Episode 14, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 137\n", "Episode 14, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 140\n", "Episode 14, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 143\n", "Episode 14, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 145\n", "Episode 14, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 148\n", "Episode 14, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 151\n", "Episode 14, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 154\n", "Episode 14, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 156\n", "Episode 14, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 159\n", "Episode 14, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 161\n", "Episode 14, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 164\n", "Episode 14, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 167\n", "Episode 14, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 169\n", "Episode: 14 Best Action: 0 Best evaluation action: 2\n", "Episode: 14 Score: 169 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 15, Iteration 60, State: [7, 16.0, 122.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 15, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4\n", "Episode 15, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6\n", "Episode 15, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 10\n", "Episode 15, Iteration 60, State: [5, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 15, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 16\n", "Episode 15, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 19\n", "Episode 15, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 22\n", "Episode 15, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 25\n", "Episode 15, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 28\n", "Episode 15, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 31\n", "Episode 15, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 34\n", "Episode 15, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 37\n", "Episode 15, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 40\n", "Episode 15, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 43\n", "Episode 15, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 46\n", "Episode 15, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 48\n", "Episode 15, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 50\n", "Episode 15, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 52\n", "Episode 15, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 55\n", "Episode 15, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 57\n", "Episode 15, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 60\n", "Episode 15, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 62\n", "Episode 15, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 65\n", "Episode 15, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 67\n", "Episode 15, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 70\n", "Episode 15, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 73\n", "Episode 15, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 76\n", "Episode 15, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 79\n", "Episode 15, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 82\n", "Episode 15, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 85\n", "Episode 15, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 88\n", "Episode 15, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 89\n", "Episode 15, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 92\n", "Episode 15, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 93\n", "Episode 15, Iteration 60, State: [7, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 94\n", "Episode 15, Iteration 60, State: [1, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 95\n", "Episode 15, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 96\n", "Episode 15, Iteration 60, State: [4, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 97\n", "Episode 15, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 98\n", "Episode 15, Iteration 60, State: [1, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 99\n", "Episode 15, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 101\n", "Episode 15, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 102\n", "Episode 15, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 103\n", "Episode 15, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 104\n", "Episode 15, Iteration 60, State: [4, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 106\n", "Episode 15, Iteration 60, State: [7, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 107\n", "Episode 15, Iteration 60, State: [4, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 109\n", "Episode 15, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 110\n", "Episode 15, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 111\n", "Episode 15, Iteration 60, State: [1, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 113\n", "Episode 15, Iteration 60, State: [1, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 115\n", "Episode 15, Iteration 60, State: [3, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 117\n", "Episode 15, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 119\n", "Episode 15, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 120\n", "Episode 15, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 121\n", "Episode 15, Iteration 60, State: [7, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 122\n", "Episode 15, Iteration 60, State: [5, 23.0, 165.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 170.0, 98.0), Reward: 1, , Cumulative Score: 123\n", "Episode 15, Iteration 60, State: [6, 24.0, 170.0, 98.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 25.0, 175.0, 99.0), Reward: 1, , Cumulative Score: 124\n", "Episode 15, Iteration 60, State: [1, 25.0, 175.0, 99.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 24.0, 170.0, 98.0), Reward: 2, , Cumulative Score: 126\n", "Episode: 15 Best Action: 0 Best evaluation action: 2\n", "Episode: 15 Score: 126 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 16, Iteration 60, State: [4, 15.0, 115.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3\n", "Episode 16, Iteration 60, State: [5, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 4\n", "Episode 16, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 7\n", "Episode 16, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 16, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 13\n", "Episode 16, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 16\n", "Episode 16, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 19\n", "Episode 16, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 22\n", "Episode 16, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 25\n", "Episode 16, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 26\n", "Episode 16, Iteration 60, State: [3, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 27\n", "Episode 16, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 29\n", "Episode 16, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 31\n", "Episode 16, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 32\n", "Episode 16, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 34\n", "Episode 16, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 35\n", "Episode 16, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 38\n", "Episode 16, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 39\n", "Episode 16, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 16, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 43\n", "Episode 16, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 46\n", "Episode 16, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 49\n", "Episode 16, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 52\n", "Episode 16, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 54\n", "Episode 16, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 16, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 60\n", "Episode 16, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 63\n", "Episode 16, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 65\n", "Episode 16, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 67\n", "Episode 16, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 70\n", "Episode 16, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 72\n", "Episode 16, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 75\n", "Episode 16, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 78\n", "Episode 16, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 16, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 84\n", "Episode 16, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 87\n", "Episode 16, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 90\n", "Episode 16, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 93\n", "Episode 16, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 96\n", "Episode 16, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 99\n", "Episode 16, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 100\n", "Episode 16, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 101\n", "Episode 16, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 102\n", "Episode 16, Iteration 60, State: [4, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 103\n", "Episode 16, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 105\n", "Episode 16, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 106\n", "Episode 16, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 108\n", "Episode 16, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 16, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 111\n", "Episode 16, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 114\n", "Episode 16, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 115\n", "Episode 16, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 16, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 119\n", "Episode 16, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 121\n", "Episode 16, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 124\n", "Episode 16, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 126\n", "Episode 16, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 129\n", "Episode 16, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 132\n", "Episode 16, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 135\n", "Episode 16, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 138\n", "Episode: 16 Best Action: 0 Best evaluation action: 2\n", "Episode: 16 Score: 138 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 17, Iteration 60, State: [1, 14.0, 122.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 89.0), Reward: 1, , Cumulative Score: 1\n", "Episode 17, Iteration 60, State: [1, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 3\n", "Episode 17, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6\n", "Episode 17, Iteration 60, State: [5, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 10\n", "Episode 17, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 17, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 17\n", "Episode 17, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 20\n", "Episode 17, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 22\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 24\n", "Episode 17, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 26\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 28\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 30\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 33\n", "Episode 17, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 35\n", "Episode 17, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 37\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 39\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 17, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 43\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 46\n", "Episode 17, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 48\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 50\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 52\n", "Episode 17, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 54\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 56\n", "Episode 17, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 58\n", "Episode 17, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 60\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 62\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 64\n", "Episode 17, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 66\n", "Episode 17, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 68\n", "Episode 17, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 70\n", "Episode 17, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 72\n", "Episode 17, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 74\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 77\n", "Episode 17, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 81\n", "Episode 17, Iteration 60, State: [1, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 84\n", "Episode 17, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 87\n", "Episode 17, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90\n", "Episode 17, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 94\n", "Episode 17, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 97\n", "Episode 17, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 99\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 102\n", "Episode 17, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 17, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 113\n", "Episode 17, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 115\n", "Episode 17, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 17, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 17, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 121\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 124\n", "Episode 17, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 127\n", "Episode 17, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 130\n", "Episode 17, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 133\n", "Episode 17, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 136\n", "Episode 17, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 139\n", "Episode 17, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 142\n", "Episode 17, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 144\n", "Episode 17, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 147\n", "Episode: 17 Best Action: 0 Best evaluation action: 2\n", "Episode: 17 Score: 147 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 18, Iteration 60, State: [2, 16.0, 121.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 18, Iteration 60, State: [4, 16.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 18, Iteration 60, State: [4, 17.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9\n", "Episode 18, Iteration 60, State: [6, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 12\n", "Episode 18, Iteration 60, State: [7, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 15\n", "Episode 18, Iteration 60, State: [3, 16.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 18\n", "Episode 18, Iteration 60, State: [7, 15.0, 120.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 22\n", "Episode 18, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 25\n", "Episode 18, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 29\n", "Episode 18, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 32\n", "Episode 18, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 34\n", "Episode 18, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 37\n", "Episode 18, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 40\n", "Episode 18, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 43\n", "Episode 18, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 47\n", "Episode 18, Iteration 60, State: [2, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 50\n", "Episode 18, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 52\n", "Episode 18, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 55\n", "Episode 18, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 58\n", "Episode 18, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 60\n", "Episode 18, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 63\n", "Episode 18, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 65\n", "Episode 18, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 68\n", "Episode 18, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 71\n", "Episode 18, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 73\n", "Episode 18, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 76\n", "Episode 18, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 78\n", "Episode 18, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81\n", "Episode 18, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84\n", "Episode 18, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 86\n", "Episode 18, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 89\n", "Episode 18, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 91\n", "Episode 18, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 94\n", "Episode 18, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 97\n", "Episode 18, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 99\n", "Episode 18, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 18, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 104\n", "Episode 18, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 107\n", "Episode 18, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 110\n", "Episode 18, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 113\n", "Episode 18, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 116\n", "Episode 18, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 119\n", "Episode 18, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 121\n", "Episode 18, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 124\n", "Episode 18, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 126\n", "Episode 18, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 129\n", "Episode 18, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 132\n", "Episode 18, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 135\n", "Episode 18, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 138\n", "Episode 18, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 140\n", "Episode 18, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 143\n", "Episode 18, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 147\n", "Episode 18, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 150\n", "Episode 18, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 152\n", "Episode 18, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 155\n", "Episode 18, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 158\n", "Episode 18, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 160\n", "Episode 18, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 163\n", "Episode 18, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 166\n", "Episode 18, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 168\n", "Episode: 18 Best Action: 0 Best evaluation action: 0\n", "Episode: 18 Score: 168 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 19, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 19, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 6\n", "Episode 19, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 19, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 12\n", "Episode 19, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 15\n", "Episode 19, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 18\n", "Episode 19, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 21\n", "Episode 19, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 24\n", "Episode 19, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 27\n", "Episode 19, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 30\n", "Episode 19, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 19, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 36\n", "Episode 19, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 39\n", "Episode 19, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 40\n", "Episode 19, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 43\n", "Episode 19, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 44\n", "Episode 19, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 46\n", "Episode 19, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 47\n", "Episode 19, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 19, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 50\n", "Episode 19, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 52\n", "Episode 19, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 54\n", "Episode 19, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 19, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 60\n", "Episode 19, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 63\n", "Episode 19, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 64\n", "Episode 19, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 65\n", "Episode 19, Iteration 60, State: [3, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 66\n", "Episode 19, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 68\n", "Episode 19, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 69\n", "Episode 19, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 70\n", "Episode 19, Iteration 60, State: [4, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 71\n", "Episode 19, Iteration 60, State: [4, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 73\n", "Episode 19, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 75\n", "Episode 19, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 76\n", "Episode 19, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 78\n", "Episode 19, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 79\n", "Episode 19, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 80\n", "Episode 19, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 81\n", "Episode 19, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 83\n", "Episode 19, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 84\n", "Episode 19, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 85\n", "Episode 19, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 86\n", "Episode 19, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 87\n", "Episode 19, Iteration 60, State: [1, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 89\n", "Episode 19, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 90\n", "Episode 19, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 91\n", "Episode 19, Iteration 60, State: [1, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 93\n", "Episode 19, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 94\n", "Episode 19, Iteration 60, State: [4, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 96\n", "Episode 19, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 97\n", "Episode 19, Iteration 60, State: [6, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 98\n", "Episode 19, Iteration 60, State: [2, 23.0, 165.0, 97.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 100\n", "Episode 19, Iteration 60, State: [1, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 102\n", "Episode 19, Iteration 60, State: [7, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 104\n", "Episode 19, Iteration 60, State: [6, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 105\n", "Episode 19, Iteration 60, State: [6, 23.0, 165.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 170.0, 98.0), Reward: 1, , Cumulative Score: 106\n", "Episode 19, Iteration 60, State: [5, 24.0, 170.0, 98.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 25.0, 175.0, 99.0), Reward: 1, , Cumulative Score: 107\n", "Episode 19, Iteration 60, State: [1, 25.0, 175.0, 99.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 24.0, 170.0, 98.0), Reward: 2, , Cumulative Score: 109\n", "Episode 19, Iteration 60, State: [2, 24.0, 170.0, 98.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 23.0, 165.0, 97.0), Reward: 2, , Cumulative Score: 111\n", "Episode: 19 Best Action: 2 Best evaluation action: 2\n", "Episode: 19 Score: 111 Best Reward: 3 Gsize State: 5.0 Bratio State: 1:16.0 Btime State: 130.0 convert: 2 minutes 10 seconds Temperature State: 90.0\n", "Episode 20, Iteration 60, State: [7, 14.0, 123.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 20, Iteration 60, State: [2, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 20, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9\n", "Episode 20, Iteration 60, State: [1, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 20, Iteration 60, State: [6, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 14\n", "Episode 20, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 16\n", "Episode 20, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 18\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 20\n", "Episode 20, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 22\n", "Episode 20, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 24\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 26\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 28\n", "Episode 20, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 30\n", "Episode 20, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 20, Iteration 60, State: [7, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 35\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 37\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 39\n", "Episode 20, Iteration 60, State: [7, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 41\n", "Episode 20, Iteration 60, State: [2, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 43\n", "Episode 20, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 45\n", "Episode 20, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 47\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 49\n", "Episode 20, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 51\n", "Episode 20, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 53\n", "Episode 20, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 56\n", "Episode 20, Iteration 60, State: [2, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 59\n", "Episode 20, Iteration 60, State: [1, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 62\n", "Episode 20, Iteration 60, State: [7, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 64\n", "Episode 20, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 66\n", "Episode 20, Iteration 60, State: [7, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 68\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 70\n", "Episode 20, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 72\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 74\n", "Episode 20, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 76\n", "Episode 20, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 78\n", "Episode 20, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 20, Iteration 60, State: [6, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 83\n", "Episode 20, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 85\n", "Episode 20, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 88\n", "Episode 20, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 91\n", "Episode 20, Iteration 60, State: [6, 13.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 95\n", "Episode 20, Iteration 60, State: [1, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 97\n", "Episode 20, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 101\n", "Episode 20, Iteration 60, State: [1, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 103\n", "Episode 20, Iteration 60, State: [2, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 105\n", "Episode 20, Iteration 60, State: [6, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 107\n", "Episode 20, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 109\n", "Episode 20, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 112\n", "Episode 20, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 115\n", "Episode 20, Iteration 60, State: [7, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 118\n", "Episode 20, Iteration 60, State: [7, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 121\n", "Episode 20, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 124\n", "Episode 20, Iteration 60, State: [5, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 126\n", "Episode 20, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 128\n", "Episode 20, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 130\n", "Episode 20, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 132\n", "Episode 20, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 134\n", "Episode 20, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 136\n", "Episode 20, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 138\n", "Episode 20, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 140\n", "Episode: 20 Best Action: 0 Best evaluation action: 0\n", "Episode: 20 Score: 140 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 21, Iteration 60, State: [5, 16.0, 117.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 21, Iteration 60, State: [6, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4\n", "Episode 21, Iteration 60, State: [4, 16.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 17.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 6\n", "Episode 21, Iteration 60, State: [3, 17.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 8\n", "Episode 21, Iteration 60, State: [5, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 10\n", "Episode 21, Iteration 60, State: [5, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 12\n", "Episode 21, Iteration 60, State: [6, 16.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 14\n", "Episode 21, Iteration 60, State: [3, 16.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 17\n", "Episode 21, Iteration 60, State: [2, 15.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 20\n", "Episode 21, Iteration 60, State: [4, 16.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 23\n", "Episode 21, Iteration 60, State: [6, 15.0, 120.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 27\n", "Episode 21, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 30\n", "Episode 21, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 33\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 35\n", "Episode 21, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 38\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 40\n", "Episode 21, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 43\n", "Episode 21, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 46\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 48\n", "Episode 21, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 51\n", "Episode 21, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 54\n", "Episode 21, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 56\n", "Episode 21, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 59\n", "Episode 21, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 61\n", "Episode 21, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 64\n", "Episode 21, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 67\n", "Episode 21, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 70\n", "Episode 21, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 72\n", "Episode 21, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 75\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 77\n", "Episode 21, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 80\n", "Episode 21, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 82\n", "Episode 21, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 85\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 87\n", "Episode 21, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 90\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 92\n", "Episode 21, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 95\n", "Episode 21, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 98\n", "Episode 21, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 102\n", "Episode 21, Iteration 60, State: [4, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 105\n", "Episode 21, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 107\n", "Episode 21, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 110\n", "Episode 21, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 113\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 115\n", "Episode 21, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 118\n", "Episode 21, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 121\n", "Episode 21, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 124\n", "Episode 21, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 126\n", "Episode 21, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 129\n", "Episode 21, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 131\n", "Episode 21, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 134\n", "Episode 21, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 136\n", "Episode 21, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 139\n", "Episode 21, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 142\n", "Episode 21, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 145\n", "Episode 21, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 148\n", "Episode 21, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 152\n", "Episode 21, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 155\n", "Episode 21, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 159\n", "Episode 21, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 162\n", "Episode: 21 Best Action: 0 Best evaluation action: 0\n", "Episode: 21 Score: 162 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 22, Iteration 60, State: [2, 15.0, 118.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 22, Iteration 60, State: [6, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 7\n", "Episode 22, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 22, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 12\n", "Episode 22, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 22, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 16\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 18\n", "Episode 22, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 20\n", "Episode 22, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 23\n", "Episode 22, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 26\n", "Episode 22, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 29\n", "Episode 22, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 32\n", "Episode 22, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 35\n", "Episode 22, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 37\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 39\n", "Episode 22, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 22, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 43\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 45\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 47\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 22, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 22, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 22, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 22, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 22, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 22, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 66\n", "Episode 22, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 69\n", "Episode 22, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 22, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 22, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75\n", "Episode 22, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 79\n", "Episode 22, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 81\n", "Episode 22, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 83\n", "Episode 22, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 85\n", "Episode 22, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 87\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 89\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 91\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 93\n", "Episode 22, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 95\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 97\n", "Episode 22, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 100\n", "Episode 22, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 22, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 22, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109\n", "Episode 22, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 111\n", "Episode 22, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 113\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 115\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 22, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 120\n", "Episode 22, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 123\n", "Episode 22, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 126\n", "Episode 22, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 128\n", "Episode 22, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 130\n", "Episode 22, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 132\n", "Episode 22, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 134\n", "Episode 22, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 136\n", "Episode: 22 Best Action: 0 Best evaluation action: 0\n", "Episode: 22 Score: 136 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 23, Iteration 60, State: [3, 16.0, 121.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 23, Iteration 60, State: [5, 17.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 5\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 8\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 11\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 14\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 17\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 20\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 23\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 26\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 29\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 32\n", "Episode 23, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 35\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 38\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 41\n", "Episode 23, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 44\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 47\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 50\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 53\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 56\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 59\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 62\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 65\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 68\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 71\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 74\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 77\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 80\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 83\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 86\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 89\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 92\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 95\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 98\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 101\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 104\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 107\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 110\n", "Episode 23, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 113\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 116\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 119\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 122\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 125\n", "Episode 23, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 128\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 131\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 134\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 137\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 140\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 143\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 146\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 149\n", "Episode 23, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 152\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 155\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 158\n", "Episode 23, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 161\n", "Episode 23, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 164\n", "Episode 23, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 167\n", "Episode 23, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 170\n", "Episode 23, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 173\n", "Episode 23, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 176\n", "Episode 23, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 179\n", "Episode: 23 Best Action: 0 Best evaluation action: 2\n", "Episode: 23 Score: 179 Best Reward: 3 Gsize State: 3.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 24, Iteration 60, State: [5, 14.0, 115.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 14.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2\n", "Episode 24, Iteration 60, State: [5, 14.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4\n", "Episode 24, Iteration 60, State: [4, 13.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 8\n", "Episode 24, Iteration 60, State: [1, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 10\n", "Episode 24, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 12\n", "Episode 24, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 24, Iteration 60, State: [5, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 16\n", "Episode 24, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 18\n", "Episode 24, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 20\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 22\n", "Episode 24, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 24\n", "Episode 24, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 26\n", "Episode 24, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 28\n", "Episode 24, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 30\n", "Episode 24, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 32\n", "Episode 24, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 34\n", "Episode 24, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 36\n", "Episode 24, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 38\n", "Episode 24, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 41\n", "Episode 24, Iteration 60, State: [4, 14.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 44\n", "Episode 24, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 47\n", "Episode 24, Iteration 60, State: [7, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 50\n", "Episode 24, Iteration 60, State: [7, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 53\n", "Episode 24, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 56\n", "Episode 24, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 59\n", "Episode 24, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 62\n", "Episode 24, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 64\n", "Episode 24, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 66\n", "Episode 24, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 68\n", "Episode 24, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 70\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 72\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 74\n", "Episode 24, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 77\n", "Episode 24, Iteration 60, State: [1, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80\n", "Episode 24, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 83\n", "Episode 24, Iteration 60, State: [5, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 85\n", "Episode 24, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 87\n", "Episode 24, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 89\n", "Episode 24, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 91\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 93\n", "Episode 24, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 95\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 97\n", "Episode 24, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 99\n", "Episode 24, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 101\n", "Episode 24, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 103\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 105\n", "Episode 24, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 107\n", "Episode 24, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 109\n", "Episode 24, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 112\n", "Episode 24, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 114\n", "Episode 24, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 116\n", "Episode 24, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 118\n", "Episode 24, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 120\n", "Episode 24, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 122\n", "Episode 24, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 124\n", "Episode 24, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 126\n", "Episode 24, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 129\n", "Episode 24, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 132\n", "Episode 24, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 135\n", "Episode 24, Iteration 60, State: [4, 14.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 138\n", "Episode: 24 Best Action: 0 Best evaluation action: 2\n", "Episode: 24 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 25, Iteration 60, State: [6, 16.0, 119.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 25, Iteration 60, State: [5, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4\n", "Episode 25, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 6\n", "Episode 25, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 10\n", "Episode 25, Iteration 60, State: [3, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 25, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 16\n", "Episode 25, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 19\n", "Episode 25, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 22\n", "Episode 25, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 25\n", "Episode 25, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 28\n", "Episode 25, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 30\n", "Episode 25, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 25, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 36\n", "Episode 25, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 39\n", "Episode 25, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 42\n", "Episode 25, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 45\n", "Episode 25, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 46\n", "Episode 25, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 48\n", "Episode 25, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 50\n", "Episode 25, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 53\n", "Episode 25, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 56\n", "Episode 25, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 59\n", "Episode 25, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 62\n", "Episode 25, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 65\n", "Episode 25, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 66\n", "Episode 25, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 67\n", "Episode 25, Iteration 60, State: [3, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 68\n", "Episode 25, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 69\n", "Episode 25, Iteration 60, State: [4, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 70\n", "Episode 25, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 72\n", "Episode 25, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 74\n", "Episode 25, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 76\n", "Episode 25, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 78\n", "Episode 25, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 25, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 83\n", "Episode 25, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 86\n", "Episode 25, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 89\n", "Episode 25, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 92\n", "Episode 25, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 94\n", "Episode 25, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 97\n", "Episode 25, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 100\n", "Episode 25, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 103\n", "Episode 25, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 105\n", "Episode 25, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 108\n", "Episode 25, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 110\n", "Episode 25, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 113\n", "Episode 25, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 116\n", "Episode 25, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 119\n", "Episode 25, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 120\n", "Episode 25, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 122\n", "Episode 25, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 123\n", "Episode 25, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 126\n", "Episode 25, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 127\n", "Episode 25, Iteration 60, State: [7, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 128\n", "Episode 25, Iteration 60, State: [4, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 129\n", "Episode 25, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 131\n", "Episode 25, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 133\n", "Episode 25, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 134\n", "Episode 25, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 136\n", "Episode 25, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 138\n", "Episode: 25 Best Action: 0 Best evaluation action: 2\n", "Episode: 25 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 26, Iteration 60, State: [5, 15.0, 122.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2\n", "Episode 26, Iteration 60, State: [5, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4\n", "Episode 26, Iteration 60, State: [2, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6\n", "Episode 26, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 8\n", "Episode 26, Iteration 60, State: [1, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 10\n", "Episode 26, Iteration 60, State: [2, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 12\n", "Episode 26, Iteration 60, State: [6, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 26, Iteration 60, State: [1, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 16\n", "Episode 26, Iteration 60, State: [5, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 18\n", "Episode 26, Iteration 60, State: [2, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 20\n", "Episode 26, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 22\n", "Episode 26, Iteration 60, State: [1, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 24\n", "Episode 26, Iteration 60, State: [6, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 26\n", "Episode 26, Iteration 60, State: [2, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 28\n", "Episode 26, Iteration 60, State: [1, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 30\n", "Episode 26, Iteration 60, State: [6, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 32\n", "Episode 26, Iteration 60, State: [4, 15.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 35\n", "Episode 26, Iteration 60, State: [4, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 39\n", "Episode 26, Iteration 60, State: [1, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 42\n", "Episode 26, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 44\n", "Episode 26, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 47\n", "Episode 26, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 50\n", "Episode 26, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 54\n", "Episode 26, Iteration 60, State: [7, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 57\n", "Episode 26, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 60\n", "Episode 26, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 62\n", "Episode 26, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 64\n", "Episode 26, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 66\n", "Episode 26, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 68\n", "Episode 26, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 70\n", "Episode 26, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 72\n", "Episode 26, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 74\n", "Episode 26, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 76\n", "Episode 26, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 79\n", "Episode 26, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 81\n", "Episode 26, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 84\n", "Episode 26, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 86\n", "Episode 26, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 88\n", "Episode 26, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 90\n", "Episode 26, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 92\n", "Episode 26, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 94\n", "Episode 26, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 96\n", "Episode 26, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 99\n", "Episode 26, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 101\n", "Episode 26, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 103\n", "Episode 26, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 105\n", "Episode 26, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 107\n", "Episode 26, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 110\n", "Episode 26, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 112\n", "Episode 26, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 26, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 26, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 118\n", "Episode 26, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 26, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 122\n", "Episode 26, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 124\n", "Episode 26, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 126\n", "Episode 26, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 128\n", "Episode 26, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 130\n", "Episode 26, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 133\n", "Episode 26, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 136\n", "Episode: 26 Best Action: 0 Best evaluation action: 2\n", "Episode: 26 Score: 136 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 27, Iteration 60, State: [3, 16.0, 124.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 27, Iteration 60, State: [4, 17.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 5\n", "Episode 27, Iteration 60, State: [1, 16.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 8\n", "Episode 27, Iteration 60, State: [2, 15.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 11\n", "Episode 27, Iteration 60, State: [6, 16.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 14\n", "Episode 27, Iteration 60, State: [3, 16.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 17\n", "Episode 27, Iteration 60, State: [3, 15.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 20\n", "Episode 27, Iteration 60, State: [5, 16.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 23\n", "Episode 27, Iteration 60, State: [3, 16.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 87.0), Reward: 3, , Cumulative Score: 26\n", "Episode 27, Iteration 60, State: [7, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 30\n", "Episode 27, Iteration 60, State: [1, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 32\n", "Episode 27, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 34\n", "Episode 27, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 36\n", "Episode 27, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 38\n", "Episode 27, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 40\n", "Episode 27, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 42\n", "Episode 27, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 44\n", "Episode 27, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 46\n", "Episode 27, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 50\n", "Episode 27, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 52\n", "Episode 27, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 54\n", "Episode 27, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 56\n", "Episode 27, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 58\n", "Episode 27, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 60\n", "Episode 27, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 62\n", "Episode 27, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 64\n", "Episode 27, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 66\n", "Episode 27, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 68\n", "Episode 27, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 70\n", "Episode 27, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 74\n", "Episode 27, Iteration 60, State: [1, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 76\n", "Episode 27, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 78\n", "Episode 27, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 80\n", "Episode 27, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 82\n", "Episode 27, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 84\n", "Episode 27, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 86\n", "Episode 27, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 88\n", "Episode 27, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 90\n", "Episode 27, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 92\n", "Episode 27, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 96\n", "Episode 27, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 98\n", "Episode 27, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 100\n", "Episode 27, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 102\n", "Episode 27, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 104\n", "Episode 27, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 106\n", "Episode 27, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 108\n", "Episode 27, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 110\n", "Episode 27, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 112\n", "Episode 27, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 114\n", "Episode 27, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 116\n", "Episode 27, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 118\n", "Episode 27, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 120\n", "Episode 27, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 124\n", "Episode 27, Iteration 60, State: [3, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 126\n", "Episode 27, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 128\n", "Episode 27, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 130\n", "Episode 27, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 132\n", "Episode 27, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 134\n", "Episode 27, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 136\n", "Episode 27, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 138\n", "Episode: 27 Best Action: 0 Best evaluation action: 0\n", "Episode: 27 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 28, Iteration 60, State: [4, 15.0, 118.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 28, Iteration 60, State: [1, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 28, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 28, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 28, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 18\n", "Episode 28, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 21\n", "Episode 28, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 24\n", "Episode 28, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 27\n", "Episode 28, Iteration 60, State: [2, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 30\n", "Episode 28, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 28, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 36\n", "Episode 28, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 39\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 42\n", "Episode 28, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 45\n", "Episode 28, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 48\n", "Episode 28, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 51\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 54\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 60\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 63\n", "Episode 28, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 66\n", "Episode 28, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 69\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 72\n", "Episode 28, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 75\n", "Episode 28, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 78\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 28, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 28, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 87\n", "Episode 28, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 90\n", "Episode 28, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 93\n", "Episode 28, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 96\n", "Episode 28, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 99\n", "Episode 28, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 102\n", "Episode 28, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 105\n", "Episode 28, Iteration 60, State: [5, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 109\n", "Episode 28, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 112\n", "Episode 28, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 28, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 28, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 118\n", "Episode 28, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 28, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 122\n", "Episode 28, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 124\n", "Episode 28, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 126\n", "Episode 28, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 128\n", "Episode 28, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 131\n", "Episode 28, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 134\n", "Episode 28, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 137\n", "Episode 28, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 140\n", "Episode 28, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 144\n", "Episode 28, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 147\n", "Episode 28, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 149\n", "Episode 28, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 151\n", "Episode 28, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 153\n", "Episode 28, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 155\n", "Episode 28, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 158\n", "Episode 28, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 160\n", "Episode 28, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 162\n", "Episode 28, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 164\n", "Episode 28, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 166\n", "Episode: 28 Best Action: 0 Best evaluation action: 2\n", "Episode: 28 Score: 166 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 29, Iteration 60, State: [2, 14.0, 122.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 29, Iteration 60, State: [2, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 29, Iteration 60, State: [7, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 8\n", "Episode 29, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 10\n", "Episode 29, Iteration 60, State: [7, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 12\n", "Episode 29, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 29, Iteration 60, State: [3, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18\n", "Episode 29, Iteration 60, State: [2, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 21\n", "Episode 29, Iteration 60, State: [4, 14.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 24\n", "Episode 29, Iteration 60, State: [7, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 26\n", "Episode 29, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 28\n", "Episode 29, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 31\n", "Episode 29, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 34\n", "Episode 29, Iteration 60, State: [3, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 37\n", "Episode 29, Iteration 60, State: [5, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 39\n", "Episode 29, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 41\n", "Episode 29, Iteration 60, State: [3, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 43\n", "Episode 29, Iteration 60, State: [2, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 45\n", "Episode 29, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 47\n", "Episode 29, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 49\n", "Episode 29, Iteration 60, State: [6, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 51\n", "Episode 29, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 53\n", "Episode 29, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 55\n", "Episode 29, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 58\n", "Episode 29, Iteration 60, State: [7, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 60\n", "Episode 29, Iteration 60, State: [7, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 62\n", "Episode 29, Iteration 60, State: [2, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 64\n", "Episode 29, Iteration 60, State: [1, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 66\n", "Episode 29, Iteration 60, State: [5, 15.0, 130.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 68\n", "Episode 29, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 71\n", "Episode 29, Iteration 60, State: [4, 14.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 74\n", "Episode 29, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 77\n", "Episode 29, Iteration 60, State: [2, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 80\n", "Episode 29, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 83\n", "Episode 29, Iteration 60, State: [3, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 86\n", "Episode 29, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 89\n", "Episode 29, Iteration 60, State: [4, 13.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 93\n", "Episode 29, Iteration 60, State: [5, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 95\n", "Episode 29, Iteration 60, State: [6, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 97\n", "Episode 29, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 99\n", "Episode 29, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 101\n", "Episode 29, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 103\n", "Episode 29, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 105\n", "Episode 29, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 107\n", "Episode 29, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 110\n", "Episode 29, Iteration 60, State: [6, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 112\n", "Episode 29, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 114\n", "Episode 29, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 116\n", "Episode 29, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 118\n", "Episode 29, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 120\n", "Episode 29, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 122\n", "Episode 29, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 124\n", "Episode 29, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 127\n", "Episode 29, Iteration 60, State: [6, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 129\n", "Episode 29, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 131\n", "Episode 29, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 133\n", "Episode 29, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 136\n", "Episode 29, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 139\n", "Episode 29, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 142\n", "Episode 29, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 146\n", "Episode: 29 Best Action: 0 Best evaluation action: 2\n", "Episode: 29 Score: 146 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 30, Iteration 60, State: [1, 14.0, 118.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4\n", "Episode 30, Iteration 60, State: [7, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 7\n", "Episode 30, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 11\n", "Episode 30, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 14\n", "Episode 30, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 17\n", "Episode 30, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 20\n", "Episode 30, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 22\n", "Episode 30, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 24\n", "Episode 30, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 27\n", "Episode 30, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 30\n", "Episode 30, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 34\n", "Episode 30, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 37\n", "Episode 30, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 41\n", "Episode 30, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 44\n", "Episode 30, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 47\n", "Episode 30, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 30, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 30, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 30, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 30, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 30, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 30, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 30, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 30, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 30, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 68\n", "Episode 30, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 71\n", "Episode 30, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 30, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75\n", "Episode 30, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 30, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 79\n", "Episode 30, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 81\n", "Episode 30, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 83\n", "Episode 30, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 85\n", "Episode 30, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 87\n", "Episode 30, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 89\n", "Episode 30, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 91\n", "Episode 30, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 93\n", "Episode 30, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 95\n", "Episode 30, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 97\n", "Episode 30, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 100\n", "Episode 30, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 103\n", "Episode 30, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 106\n", "Episode 30, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109\n", "Episode 30, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 111\n", "Episode 30, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 114\n", "Episode 30, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 118\n", "Episode 30, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 121\n", "Episode 30, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 123\n", "Episode 30, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 125\n", "Episode 30, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 128\n", "Episode 30, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 131\n", "Episode 30, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 30, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 136\n", "Episode 30, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 139\n", "Episode 30, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 143\n", "Episode 30, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 146\n", "Episode 30, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 150\n", "Episode 30, Iteration 60, State: [7, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 153\n", "Episode 30, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 156\n", "Episode 30, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 159\n", "Episode: 30 Best Action: 0 Best evaluation action: 2\n", "Episode: 30 Score: 159 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 31, Iteration 60, State: [6, 14.0, 122.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 1\n", "Episode 31, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 4\n", "Episode 31, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 7\n", "Episode 31, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 10\n", "Episode 31, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 13\n", "Episode 31, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 16\n", "Episode 31, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 19\n", "Episode 31, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 22\n", "Episode 31, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 24\n", "Episode 31, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 27\n", "Episode 31, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 29\n", "Episode 31, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 31\n", "Episode 31, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 33\n", "Episode 31, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 36\n", "Episode 31, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 38\n", "Episode 31, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 40\n", "Episode 31, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 42\n", "Episode 31, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 44\n", "Episode 31, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 47\n", "Episode 31, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 50\n", "Episode 31, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 53\n", "Episode 31, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 54\n", "Episode 31, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 56\n", "Episode 31, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 58\n", "Episode 31, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 60\n", "Episode 31, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 62\n", "Episode 31, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 65\n", "Episode 31, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 68\n", "Episode 31, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 71\n", "Episode 31, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 73\n", "Episode 31, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 75\n", "Episode 31, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 78\n", "Episode 31, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 81\n", "Episode 31, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 31, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 86\n", "Episode 31, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 89\n", "Episode 31, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 92\n", "Episode 31, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 95\n", "Episode 31, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 97\n", "Episode 31, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 100\n", "Episode 31, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 103\n", "Episode 31, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 106\n", "Episode 31, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 107\n", "Episode 31, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 108\n", "Episode 31, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 109\n", "Episode 31, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 110\n", "Episode 31, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 111\n", "Episode 31, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 112\n", "Episode 31, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 114\n", "Episode 31, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 115\n", "Episode 31, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 117\n", "Episode 31, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 31, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 121\n", "Episode 31, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 123\n", "Episode 31, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 125\n", "Episode 31, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 128\n", "Episode 31, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 131\n", "Episode 31, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 134\n", "Episode 31, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 135\n", "Episode 31, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 138\n", "Episode: 31 Best Action: 2 Best evaluation action: 2\n", "Episode: 31 Score: 138 Best Reward: 3 Gsize State: 5.0 Bratio State: 1:16.0 Btime State: 130.0 convert: 2 minutes 10 seconds Temperature State: 90.0\n", "Episode 32, Iteration 60, State: [3, 15.0, 121.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 4\n", "Episode 32, Iteration 60, State: [3, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 7\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 10\n", "Episode 32, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 14\n", "Episode 32, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 17\n", "Episode 32, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 19\n", "Episode 32, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 22\n", "Episode 32, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 24\n", "Episode 32, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 27\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 30\n", "Episode 32, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 34\n", "Episode 32, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 37\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 40\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 43\n", "Episode 32, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 45\n", "Episode 32, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 48\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 51\n", "Episode 32, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 53\n", "Episode 32, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 56\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 59\n", "Episode 32, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 63\n", "Episode 32, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 66\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 69\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 72\n", "Episode 32, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 74\n", "Episode 32, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 77\n", "Episode 32, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 81\n", "Episode 32, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 87\n", "Episode 32, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 89\n", "Episode 32, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 92\n", "Episode 32, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 96\n", "Episode 32, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 32, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 104\n", "Episode 32, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 107\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 110\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 113\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 116\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 119\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 122\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 125\n", "Episode 32, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 127\n", "Episode 32, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 130\n", "Episode 32, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 132\n", "Episode 32, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 135\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 138\n", "Episode 32, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 140\n", "Episode 32, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 143\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 146\n", "Episode 32, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 148\n", "Episode 32, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 151\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 154\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 157\n", "Episode 32, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 160\n", "Episode 32, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 162\n", "Episode 32, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 165\n", "Episode 32, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 168\n", "Episode 32, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 171\n", "Episode 32, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 173\n", "Episode: 32 Best Action: 0 Best evaluation action: 0\n", "Episode: 32 Score: 173 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 33, Iteration 60, State: [4, 15.0, 123.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 33, Iteration 60, State: [7, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 7\n", "Episode 33, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 33, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 12\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 15\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 18\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 21\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 24\n", "Episode 33, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 26\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 29\n", "Episode 33, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 33\n", "Episode 33, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 36\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 39\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 42\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 45\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 48\n", "Episode 33, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 50\n", "Episode 33, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 52\n", "Episode 33, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 54\n", "Episode 33, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 56\n", "Episode 33, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 58\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 61\n", "Episode 33, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 65\n", "Episode 33, Iteration 60, State: [1, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 68\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 71\n", "Episode 33, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 33, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 78\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 80\n", "Episode 33, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 82\n", "Episode 33, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 84\n", "Episode 33, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 86\n", "Episode 33, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 88\n", "Episode 33, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 90\n", "Episode 33, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 92\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 94\n", "Episode 33, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 96\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 101\n", "Episode 33, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 103\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 105\n", "Episode 33, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 107\n", "Episode 33, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 109\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 112\n", "Episode 33, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 115\n", "Episode 33, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 121\n", "Episode 33, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 123\n", "Episode 33, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 125\n", "Episode 33, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 127\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 130\n", "Episode 33, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 133\n", "Episode 33, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 135\n", "Episode 33, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 137\n", "Episode 33, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 139\n", "Episode 33, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 142\n", "Episode 33, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 146\n", "Episode 33, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 149\n", "Episode 33, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 153\n", "Episode: 33 Best Action: 0 Best evaluation action: 0\n", "Episode: 33 Score: 153 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 34, Iteration 60, State: [4, 14.0, 121.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 34, Iteration 60, State: [1, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 5\n", "Episode 34, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 7\n", "Episode 34, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 9\n", "Episode 34, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 11\n", "Episode 34, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 13\n", "Episode 34, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 15\n", "Episode 34, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18\n", "Episode 34, Iteration 60, State: [3, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 20\n", "Episode 34, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 23\n", "Episode 34, Iteration 60, State: [7, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 27\n", "Episode 34, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 30\n", "Episode 34, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 33\n", "Episode 34, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 36\n", "Episode 34, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 39\n", "Episode 34, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 42\n", "Episode 34, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 46\n", "Episode 34, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 49\n", "Episode 34, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 34, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 34, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 34, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 34, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 34, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 67\n", "Episode 34, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 34, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 34, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 75\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 77\n", "Episode 34, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 79\n", "Episode 34, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 81\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 83\n", "Episode 34, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 85\n", "Episode 34, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 87\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 89\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 91\n", "Episode 34, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 93\n", "Episode 34, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 95\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 97\n", "Episode 34, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 99\n", "Episode 34, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 101\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 103\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 105\n", "Episode 34, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 107\n", "Episode 34, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 110\n", "Episode 34, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 113\n", "Episode 34, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 115\n", "Episode 34, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 34, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 121\n", "Episode 34, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 123\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 125\n", "Episode 34, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 127\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 129\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 131\n", "Episode 34, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 34, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 135\n", "Episode: 34 Best Action: 0 Best evaluation action: 0\n", "Episode: 34 Score: 135 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 35, Iteration 60, State: [7, 15.0, 118.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 35, Iteration 60, State: [3, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6\n", "Episode 35, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9\n", "Episode 35, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 13\n", "Episode 35, Iteration 60, State: [1, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 16\n", "Episode 35, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 19\n", "Episode 35, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 22\n", "Episode 35, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 25\n", "Episode 35, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 28\n", "Episode 35, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 31\n", "Episode 35, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 34\n", "Episode 35, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 37\n", "Episode 35, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 40\n", "Episode 35, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 43\n", "Episode 35, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 46\n", "Episode 35, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 49\n", "Episode 35, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 52\n", "Episode 35, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 55\n", "Episode 35, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 58\n", "Episode 35, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 61\n", "Episode 35, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 64\n", "Episode 35, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 67\n", "Episode 35, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 70\n", "Episode 35, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 73\n", "Episode 35, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 74\n", "Episode 35, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 75\n", "Episode 35, Iteration 60, State: [6, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 76\n", "Episode 35, Iteration 60, State: [7, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 77\n", "Episode 35, Iteration 60, State: [4, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 79\n", "Episode 35, Iteration 60, State: [4, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 81\n", "Episode 35, Iteration 60, State: [1, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 83\n", "Episode 35, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 85\n", "Episode 35, Iteration 60, State: [3, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 87\n", "Episode 35, Iteration 60, State: [3, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 89\n", "Episode 35, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 91\n", "Episode 35, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 93\n", "Episode 35, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 95\n", "Episode 35, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 98\n", "Episode 35, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 100\n", "Episode 35, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 102\n", "Episode 35, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 105\n", "Episode 35, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 108\n", "Episode 35, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 111\n", "Episode 35, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 114\n", "Episode 35, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 117\n", "Episode 35, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 120\n", "Episode 35, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 123\n", "Episode 35, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 124\n", "Episode 35, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 125\n", "Episode 35, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 126\n", "Episode 35, Iteration 60, State: [3, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 127\n", "Episode 35, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 128\n", "Episode 35, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 129\n", "Episode 35, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 131\n", "Episode 35, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 35, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 134\n", "Episode 35, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 136\n", "Episode 35, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 138\n", "Episode 35, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 141\n", "Episode 35, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 144\n", "Episode: 35 Best Action: 0 Best evaluation action: 2\n", "Episode: 35 Score: 144 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 36, Iteration 60, State: [2, 15.0, 122.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 36, Iteration 60, State: [1, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 6\n", "Episode 36, Iteration 60, State: [2, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 36, Iteration 60, State: [4, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 12\n", "Episode 36, Iteration 60, State: [1, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 36, Iteration 60, State: [2, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18\n", "Episode 36, Iteration 60, State: [5, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 21\n", "Episode 36, Iteration 60, State: [2, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 24\n", "Episode 36, Iteration 60, State: [6, 16.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 27\n", "Episode 36, Iteration 60, State: [5, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 30\n", "Episode 36, Iteration 60, State: [7, 18.0, 135.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 31\n", "Episode 36, Iteration 60, State: [6, 19.0, 140.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 32\n", "Episode 36, Iteration 60, State: [5, 20.0, 145.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 150.0, 95.0), Reward: 1, , Cumulative Score: 33\n", "Episode 36, Iteration 60, State: [3, 21.0, 150.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 20.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 34\n", "Episode 36, Iteration 60, State: [5, 20.0, 145.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 150.0, 95.0), Reward: 1, , Cumulative Score: 35\n", "Episode 36, Iteration 60, State: [5, 21.0, 150.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 155.0, 96.0), Reward: 1, , Cumulative Score: 36\n", "Episode 36, Iteration 60, State: [5, 22.0, 155.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 160.0, 97.0), Reward: 1, , Cumulative Score: 37\n", "Episode 36, Iteration 60, State: [5, 23.0, 160.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 165.0, 98.0), Reward: 1, , Cumulative Score: 38\n", "Episode 36, Iteration 60, State: [5, 24.0, 165.0, 98.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 25.0, 170.0, 99.0), Reward: 1, , Cumulative Score: 39\n", "Episode 36, Iteration 60, State: [7, 25.0, 170.0, 99.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 26.0, 175.0, 100.0), Reward: 1, , Cumulative Score: 40\n", "Episode 36, Iteration 60, State: [2, 26.0, 175.0, 100.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 25.0, 170.0, 99.0), Reward: 2, , Cumulative Score: 42\n", "Episode 36, Iteration 60, State: [7, 25.0, 170.0, 99.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 26.0, 175.0, 100.0), Reward: 1, , Cumulative Score: 43\n", "Episode 36, Iteration 60, State: [5, 26.0, 175.0, 100.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 27.0, 180.0, 101.0), Reward: 1, , Cumulative Score: 44\n", "Episode 36, Iteration 60, State: [1, 27.0, 180.0, 101.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 26.0, 175.0, 100.0), Reward: 2, , Cumulative Score: 46\n", "Episode 36, Iteration 60, State: [1, 26.0, 175.0, 100.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 25.0, 170.0, 99.0), Reward: 2, , Cumulative Score: 48\n", "Episode 36, Iteration 60, State: [4, 25.0, 170.0, 99.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 24.0, 165.0, 98.0), Reward: 2, , Cumulative Score: 50\n", "Episode 36, Iteration 60, State: [2, 24.0, 165.0, 98.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 23.0, 160.0, 97.0), Reward: 2, , Cumulative Score: 52\n", "Episode 36, Iteration 60, State: [6, 23.0, 160.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 24.0, 165.0, 98.0), Reward: 2, , Cumulative Score: 54\n", "Episode 36, Iteration 60, State: [2, 24.0, 165.0, 98.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 23.0, 160.0, 97.0), Reward: 2, , Cumulative Score: 56\n", "Episode 36, Iteration 60, State: [1, 23.0, 160.0, 97.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 22.0, 155.0, 96.0), Reward: 2, , Cumulative Score: 58\n", "Episode 36, Iteration 60, State: [1, 22.0, 155.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 21.0, 150.0, 95.0), Reward: 2, , Cumulative Score: 60\n", "Episode 36, Iteration 60, State: [4, 21.0, 150.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 20.0, 145.0, 94.0), Reward: 2, , Cumulative Score: 62\n", "Episode 36, Iteration 60, State: [6, 20.0, 145.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 21.0, 150.0, 95.0), Reward: 2, , Cumulative Score: 64\n", "Episode 36, Iteration 60, State: [7, 21.0, 150.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 155.0, 96.0), Reward: 1, , Cumulative Score: 65\n", "Episode 36, Iteration 60, State: [4, 22.0, 155.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 21.0, 150.0, 95.0), Reward: 2, , Cumulative Score: 67\n", "Episode 36, Iteration 60, State: [1, 21.0, 150.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 20.0, 145.0, 94.0), Reward: 2, , Cumulative Score: 69\n", "Episode 36, Iteration 60, State: [3, 20.0, 145.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 19.0, 140.0, 93.0), Reward: 2, , Cumulative Score: 71\n", "Episode 36, Iteration 60, State: [7, 19.0, 140.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 72\n", "Episode 36, Iteration 60, State: [2, 20.0, 145.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 140.0, 93.0), Reward: 2, , Cumulative Score: 74\n", "Episode 36, Iteration 60, State: [5, 19.0, 140.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 75\n", "Episode 36, Iteration 60, State: [7, 20.0, 145.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 150.0, 95.0), Reward: 1, , Cumulative Score: 76\n", "Episode 36, Iteration 60, State: [7, 21.0, 150.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 155.0, 96.0), Reward: 1, , Cumulative Score: 77\n", "Episode 36, Iteration 60, State: [6, 22.0, 155.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 160.0, 97.0), Reward: 1, , Cumulative Score: 78\n", "Episode 36, Iteration 60, State: [3, 23.0, 160.0, 97.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 22.0, 155.0, 96.0), Reward: 2, , Cumulative Score: 80\n", "Episode 36, Iteration 60, State: [1, 22.0, 155.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 21.0, 150.0, 95.0), Reward: 2, , Cumulative Score: 82\n", "Episode 36, Iteration 60, State: [1, 21.0, 150.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 20.0, 145.0, 94.0), Reward: 2, , Cumulative Score: 84\n", "Episode 36, Iteration 60, State: [3, 20.0, 145.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 19.0, 140.0, 93.0), Reward: 2, , Cumulative Score: 86\n", "Episode 36, Iteration 60, State: [1, 19.0, 140.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 88\n", "Episode 36, Iteration 60, State: [4, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 90\n", "Episode 36, Iteration 60, State: [5, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 91\n", "Episode 36, Iteration 60, State: [3, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 93\n", "Episode 36, Iteration 60, State: [5, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 18.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 94\n", "Episode 36, Iteration 60, State: [3, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 96\n", "Episode 36, Iteration 60, State: [2, 17.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 98\n", "Episode 36, Iteration 60, State: [3, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 100\n", "Episode 36, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 104\n", "Episode 36, Iteration 60, State: [3, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 107\n", "Episode 36, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 110\n", "Episode 36, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 113\n", "Episode 36, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 116\n", "Episode: 36 Best Action: 0 Best evaluation action: 2\n", "Episode: 36 Score: 116 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 37, Iteration 60, State: [7, 15.0, 115.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2\n", "Episode 37, Iteration 60, State: [6, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4\n", "Episode 37, Iteration 60, State: [3, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6\n", "Episode 37, Iteration 60, State: [4, 15.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 9\n", "Episode 37, Iteration 60, State: [6, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 13\n", "Episode 37, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 16\n", "Episode 37, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 19\n", "Episode 37, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 21\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 23\n", "Episode 37, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 25\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 27\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 29\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 31\n", "Episode 37, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 33\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 35\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 37\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 39\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 37, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 43\n", "Episode 37, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 45\n", "Episode 37, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 47\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 37, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 37, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 37, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 56\n", "Episode 37, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 59\n", "Episode 37, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 37, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 37, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 67\n", "Episode 37, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 37, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 37, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 37, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 80\n", "Episode 37, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 83\n", "Episode 37, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 87\n", "Episode 37, Iteration 60, State: [7, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90\n", "Episode 37, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 94\n", "Episode 37, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 97\n", "Episode 37, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 100\n", "Episode 37, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 37, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 112\n", "Episode 37, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 37, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 37, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 118\n", "Episode 37, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 37, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 122\n", "Episode 37, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 125\n", "Episode 37, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 127\n", "Episode 37, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 129\n", "Episode 37, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 131\n", "Episode 37, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 37, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 135\n", "Episode 37, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 137\n", "Episode 37, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 139\n", "Episode: 37 Best Action: 0 Best evaluation action: 0\n", "Episode: 37 Score: 139 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 38, Iteration 60, State: [3, 14.0, 124.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (5.0, 15.0, 130.0, 89.0), Reward: 1, , Cumulative Score: 1\n", "Episode 38, Iteration 60, State: [4, 15.0, 130.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 4\n", "Episode 38, Iteration 60, State: [7, 16.0, 135.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 7\n", "Episode 38, Iteration 60, State: [3, 17.0, 140.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 10\n", "Episode 38, Iteration 60, State: [4, 16.0, 135.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 12\n", "Episode 38, Iteration 60, State: [3, 15.0, 130.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 38, Iteration 60, State: [6, 16.0, 135.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 18\n", "Episode 38, Iteration 60, State: [1, 17.0, 140.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 21\n", "Episode 38, Iteration 60, State: [2, 16.0, 135.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 23\n", "Episode 38, Iteration 60, State: [1, 15.0, 130.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 26\n", "Episode 38, Iteration 60, State: [2, 16.0, 135.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 28\n", "Episode 38, Iteration 60, State: [4, 15.0, 130.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 31\n", "Episode 38, Iteration 60, State: [4, 16.0, 135.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 33\n", "Episode 38, Iteration 60, State: [6, 15.0, 130.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 35\n", "Episode 38, Iteration 60, State: [6, 15.0, 130.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 37\n", "Episode 38, Iteration 60, State: [6, 15.0, 130.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 39\n", "Episode 38, Iteration 60, State: [7, 15.0, 130.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 41\n", "Episode 38, Iteration 60, State: [1, 15.0, 130.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 135.0, 90.0), Reward: 3, , Cumulative Score: 44\n", "Episode 38, Iteration 60, State: [7, 16.0, 135.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 140.0, 91.0), Reward: 3, , Cumulative Score: 47\n", "Episode 38, Iteration 60, State: [5, 17.0, 140.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 50\n", "Episode 38, Iteration 60, State: [7, 18.0, 145.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 150.0, 93.0), Reward: 1, , Cumulative Score: 51\n", "Episode 38, Iteration 60, State: [7, 19.0, 150.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 155.0, 94.0), Reward: 1, , Cumulative Score: 52\n", "Episode 38, Iteration 60, State: [1, 20.0, 155.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 19.0, 150.0, 93.0), Reward: 2, , Cumulative Score: 54\n", "Episode 38, Iteration 60, State: [1, 19.0, 150.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 145.0, 92.0), Reward: 2, , Cumulative Score: 56\n", "Episode 38, Iteration 60, State: [1, 18.0, 145.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 58\n", "Episode 38, Iteration 60, State: [2, 17.0, 140.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 135.0, 90.0), Reward: 2, , Cumulative Score: 60\n", "Episode 38, Iteration 60, State: [6, 16.0, 135.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 140.0, 91.0), Reward: 1, , Cumulative Score: 61\n", "Episode 38, Iteration 60, State: [6, 17.0, 140.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 145.0, 92.0), Reward: 3, , Cumulative Score: 64\n", "Episode 38, Iteration 60, State: [5, 18.0, 145.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 150.0, 93.0), Reward: 1, , Cumulative Score: 65\n", "Episode 38, Iteration 60, State: [6, 19.0, 150.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 155.0, 94.0), Reward: 1, , Cumulative Score: 66\n", "Episode 38, Iteration 60, State: [5, 20.0, 155.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 160.0, 95.0), Reward: 1, , Cumulative Score: 67\n", "Episode 38, Iteration 60, State: [6, 21.0, 160.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 165.0, 96.0), Reward: 1, , Cumulative Score: 68\n", "Episode 38, Iteration 60, State: [2, 22.0, 165.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 21.0, 160.0, 95.0), Reward: 2, , Cumulative Score: 70\n", "Episode 38, Iteration 60, State: [1, 21.0, 160.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 20.0, 155.0, 94.0), Reward: 2, , Cumulative Score: 72\n", "Episode 38, Iteration 60, State: [7, 20.0, 155.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 21.0, 160.0, 95.0), Reward: 2, , Cumulative Score: 74\n", "Episode 38, Iteration 60, State: [7, 21.0, 160.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 165.0, 96.0), Reward: 1, , Cumulative Score: 75\n", "Episode 38, Iteration 60, State: [7, 22.0, 165.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 170.0, 97.0), Reward: 1, , Cumulative Score: 76\n", "Episode 38, Iteration 60, State: [1, 23.0, 170.0, 97.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 22.0, 165.0, 96.0), Reward: 2, , Cumulative Score: 78\n", "Episode 38, Iteration 60, State: [2, 22.0, 165.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 21.0, 160.0, 95.0), Reward: 2, , Cumulative Score: 80\n", "Episode 38, Iteration 60, State: [4, 21.0, 160.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 20.0, 155.0, 94.0), Reward: 2, , Cumulative Score: 82\n", "Episode 38, Iteration 60, State: [7, 20.0, 155.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 21.0, 160.0, 95.0), Reward: 2, , Cumulative Score: 84\n", "Episode 38, Iteration 60, State: [6, 21.0, 160.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 165.0, 96.0), Reward: 1, , Cumulative Score: 85\n", "Episode 38, Iteration 60, State: [3, 22.0, 165.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 21.0, 160.0, 95.0), Reward: 2, , Cumulative Score: 87\n", "Episode 38, Iteration 60, State: [4, 21.0, 160.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 20.0, 155.0, 94.0), Reward: 2, , Cumulative Score: 89\n", "Episode 38, Iteration 60, State: [2, 20.0, 155.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 150.0, 93.0), Reward: 2, , Cumulative Score: 91\n", "Episode 38, Iteration 60, State: [2, 19.0, 150.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 145.0, 92.0), Reward: 2, , Cumulative Score: 93\n", "Episode 38, Iteration 60, State: [3, 18.0, 145.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 17.0, 140.0, 91.0), Reward: 2, , Cumulative Score: 95\n", "Episode 38, Iteration 60, State: [6, 17.0, 140.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 145.0, 92.0), Reward: 1, , Cumulative Score: 96\n", "Episode 38, Iteration 60, State: [7, 18.0, 145.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 150.0, 93.0), Reward: 1, , Cumulative Score: 97\n", "Episode 38, Iteration 60, State: [5, 19.0, 150.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 155.0, 94.0), Reward: 1, , Cumulative Score: 98\n", "Episode 38, Iteration 60, State: [5, 20.0, 155.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 160.0, 95.0), Reward: 1, , Cumulative Score: 99\n", "Episode 38, Iteration 60, State: [5, 21.0, 160.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 165.0, 96.0), Reward: 1, , Cumulative Score: 100\n", "Episode 38, Iteration 60, State: [5, 22.0, 165.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 170.0, 97.0), Reward: 1, , Cumulative Score: 101\n", "Episode 38, Iteration 60, State: [7, 23.0, 170.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 175.0, 98.0), Reward: 1, , Cumulative Score: 102\n", "Episode 38, Iteration 60, State: [7, 24.0, 175.0, 98.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 25.0, 180.0, 99.0), Reward: 1, , Cumulative Score: 103\n", "Episode 38, Iteration 60, State: [3, 25.0, 180.0, 99.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 24.0, 175.0, 98.0), Reward: 2, , Cumulative Score: 105\n", "Episode 38, Iteration 60, State: [1, 24.0, 175.0, 98.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 23.0, 170.0, 97.0), Reward: 2, , Cumulative Score: 107\n", "Episode 38, Iteration 60, State: [2, 23.0, 170.0, 97.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 22.0, 165.0, 96.0), Reward: 2, , Cumulative Score: 109\n", "Episode 38, Iteration 60, State: [3, 22.0, 165.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 21.0, 160.0, 95.0), Reward: 2, , Cumulative Score: 111\n", "Episode 38, Iteration 60, State: [4, 21.0, 160.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 20.0, 155.0, 94.0), Reward: 2, , Cumulative Score: 113\n", "Episode: 38 Best Action: 2 Best evaluation action: 2\n", "Episode: 38 Score: 113 Best Reward: 3 Gsize State: 5.0 Bratio State: 1:16.0 Btime State: 135.0 convert: 2 minutes 15 seconds Temperature State: 90.0\n", "Episode 39, Iteration 60, State: [2, 15.0, 117.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 2\n", "Episode 39, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 4\n", "Episode 39, Iteration 60, State: [3, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 6\n", "Episode 39, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 8\n", "Episode 39, Iteration 60, State: [6, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 90.0), Reward: 2, , Cumulative Score: 10\n", "Episode 39, Iteration 60, State: [4, 15.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 39, Iteration 60, State: [1, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 16\n", "Episode 39, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 19\n", "Episode 39, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 22\n", "Episode 39, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 25\n", "Episode 39, Iteration 60, State: [4, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 29\n", "Episode 39, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 32\n", "Episode 39, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 35\n", "Episode 39, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 37\n", "Episode 39, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 39\n", "Episode 39, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 43\n", "Episode 39, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 45\n", "Episode 39, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 47\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 39, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 39, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 39, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 39, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 39, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 61\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 63\n", "Episode 39, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 39, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 67\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 39, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 73\n", "Episode 39, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 39, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 79\n", "Episode 39, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 82\n", "Episode 39, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 84\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 86\n", "Episode 39, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 88\n", "Episode 39, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 91\n", "Episode 39, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 95\n", "Episode 39, Iteration 60, State: [4, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 98\n", "Episode 39, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 102\n", "Episode 39, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 105\n", "Episode 39, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 108\n", "Episode 39, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 39, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 112\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 39, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 39, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 119\n", "Episode 39, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 122\n", "Episode 39, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 125\n", "Episode 39, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 127\n", "Episode 39, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 129\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 131\n", "Episode 39, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 133\n", "Episode 39, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 135\n", "Episode 39, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 138\n", "Episode 39, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 140\n", "Episode 39, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 142\n", "Episode 39, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 144\n", "Episode: 39 Best Action: 0 Best evaluation action: 2\n", "Episode: 39 Score: 144 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 40, Iteration 60, State: [1, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 2\n", "Episode 40, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 4\n", "Episode 40, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 6\n", "Episode 40, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 8\n", "Episode 40, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 10\n", "Episode 40, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 12\n", "Episode 40, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 40, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 16\n", "Episode 40, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 18\n", "Episode 40, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 20\n", "Episode 40, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 22\n", "Episode 40, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 24\n", "Episode 40, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 26\n", "Episode 40, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 28\n", "Episode 40, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 30\n", "Episode 40, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 33\n", "Episode 40, Iteration 60, State: [5, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 37\n", "Episode 40, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 40\n", "Episode 40, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 43\n", "Episode 40, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 45\n", "Episode 40, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 47\n", "Episode 40, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 40, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 40, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 40, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 58\n", "Episode 40, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 61\n", "Episode 40, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 64\n", "Episode 40, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 67\n", "Episode 40, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 70\n", "Episode 40, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 72\n", "Episode 40, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 74\n", "Episode 40, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 76\n", "Episode 40, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 78\n", "Episode 40, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 80\n", "Episode 40, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 82\n", "Episode 40, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 84\n", "Episode 40, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 87\n", "Episode 40, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90\n", "Episode 40, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 92\n", "Episode 40, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 94\n", "Episode 40, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 96\n", "Episode 40, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98\n", "Episode 40, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100\n", "Episode 40, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 40, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109\n", "Episode 40, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 112\n", "Episode 40, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 40, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 119\n", "Episode 40, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 122\n", "Episode 40, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 124\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 126\n", "Episode 40, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 128\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 130\n", "Episode 40, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 132\n", "Episode 40, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 134\n", "Episode 40, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 136\n", "Episode: 40 Best Action: 0 Best evaluation action: 2\n", "Episode: 40 Score: 136 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 41, Iteration 60, State: [3, 14.0, 115.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 4\n", "Episode 41, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 7\n", "Episode 41, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 9\n", "Episode 41, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 11\n", "Episode 41, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 13\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 16\n", "Episode 41, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 18\n", "Episode 41, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 20\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 23\n", "Episode 41, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 26\n", "Episode 41, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 30\n", "Episode 41, Iteration 60, State: [4, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 33\n", "Episode 41, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 36\n", "Episode 41, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 39\n", "Episode 41, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 41, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 43\n", "Episode 41, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 45\n", "Episode 41, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 47\n", "Episode 41, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 41, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 54\n", "Episode 41, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 58\n", "Episode 41, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 61\n", "Episode 41, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 64\n", "Episode 41, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 67\n", "Episode 41, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 69\n", "Episode 41, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 71\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 74\n", "Episode 41, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 77\n", "Episode 41, Iteration 60, State: [5, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80\n", "Episode 41, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 83\n", "Episode 41, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 85\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 88\n", "Episode 41, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 92\n", "Episode 41, Iteration 60, State: [1, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 95\n", "Episode 41, Iteration 60, State: [3, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 97\n", "Episode 41, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 99\n", "Episode 41, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 101\n", "Episode 41, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 103\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 106\n", "Episode 41, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 109\n", "Episode 41, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 111\n", "Episode 41, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 113\n", "Episode 41, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 115\n", "Episode 41, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 117\n", "Episode 41, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 41, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 121\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 124\n", "Episode 41, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 127\n", "Episode 41, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 130\n", "Episode 41, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 134\n", "Episode 41, Iteration 60, State: [3, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 137\n", "Episode 41, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 139\n", "Episode 41, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 141\n", "Episode 41, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 143\n", "Episode 41, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 146\n", "Episode 41, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 148\n", "Episode 41, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 150\n", "Episode 41, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 152\n", "Episode 41, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 154\n", "Episode: 41 Best Action: 0 Best evaluation action: 2\n", "Episode: 41 Score: 154 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 42, Iteration 60, State: [3, 14.0, 123.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 42, Iteration 60, State: [6, 13.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 7\n", "Episode 42, Iteration 60, State: [5, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 9\n", "Episode 42, Iteration 60, State: [7, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 11\n", "Episode 42, Iteration 60, State: [6, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 13\n", "Episode 42, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 15\n", "Episode 42, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 17\n", "Episode 42, Iteration 60, State: [1, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 19\n", "Episode 42, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 21\n", "Episode 42, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 23\n", "Episode 42, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 25\n", "Episode 42, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 27\n", "Episode 42, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 29\n", "Episode 42, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 31\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 34\n", "Episode 42, Iteration 60, State: [1, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 37\n", "Episode 42, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 40\n", "Episode 42, Iteration 60, State: [5, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 42\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 45\n", "Episode 42, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 48\n", "Episode 42, Iteration 60, State: [2, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 51\n", "Episode 42, Iteration 60, State: [1, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 54\n", "Episode 42, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 42, Iteration 60, State: [6, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 59\n", "Episode 42, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 61\n", "Episode 42, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 63\n", "Episode 42, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 65\n", "Episode 42, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 67\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 70\n", "Episode 42, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 73\n", "Episode 42, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 42, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 79\n", "Episode 42, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 82\n", "Episode 42, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 85\n", "Episode 42, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 89\n", "Episode 42, Iteration 60, State: [1, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 91\n", "Episode 42, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 93\n", "Episode 42, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 95\n", "Episode 42, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 97\n", "Episode 42, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 99\n", "Episode 42, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 101\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 104\n", "Episode 42, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 106\n", "Episode 42, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 108\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 111\n", "Episode 42, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 114\n", "Episode 42, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 117\n", "Episode 42, Iteration 60, State: [6, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 119\n", "Episode 42, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 121\n", "Episode 42, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 123\n", "Episode 42, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 125\n", "Episode 42, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 127\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 130\n", "Episode 42, Iteration 60, State: [6, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 132\n", "Episode 42, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 134\n", "Episode 42, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 137\n", "Episode 42, Iteration 60, State: [4, 14.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 140\n", "Episode 42, Iteration 60, State: [4, 14.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 143\n", "Episode 42, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 145\n", "Episode 42, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 147\n", "Episode: 42 Best Action: 0 Best evaluation action: 0\n", "Episode: 42 Score: 147 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 43, Iteration 60, State: [1, 16.0, 118.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 2\n", "Episode 43, Iteration 60, State: [5, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 6\n", "Episode 43, Iteration 60, State: [6, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 8\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 10\n", "Episode 43, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 14\n", "Episode 43, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 16\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 18\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 20\n", "Episode 43, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 24\n", "Episode 43, Iteration 60, State: [2, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 26\n", "Episode 43, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 28\n", "Episode 43, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 32\n", "Episode 43, Iteration 60, State: [3, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 34\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 36\n", "Episode 43, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 38\n", "Episode 43, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 40\n", "Episode 43, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 42\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 44\n", "Episode 43, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 46\n", "Episode 43, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 48\n", "Episode 43, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 50\n", "Episode 43, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 52\n", "Episode 43, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 54\n", "Episode 43, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 56\n", "Episode 43, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 58\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 60\n", "Episode 43, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 62\n", "Episode 43, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 64\n", "Episode 43, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 66\n", "Episode 43, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 68\n", "Episode 43, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 72\n", "Episode 43, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 74\n", "Episode 43, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 76\n", "Episode 43, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 78\n", "Episode 43, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 80\n", "Episode 43, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 82\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 84\n", "Episode 43, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 86\n", "Episode 43, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 88\n", "Episode 43, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 90\n", "Episode 43, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 92\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 94\n", "Episode 43, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 96\n", "Episode 43, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 98\n", "Episode 43, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 100\n", "Episode 43, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 102\n", "Episode 43, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 104\n", "Episode 43, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 106\n", "Episode 43, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 108\n", "Episode 43, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 110\n", "Episode 43, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 114\n", "Episode 43, Iteration 60, State: [1, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 116\n", "Episode 43, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 118\n", "Episode 43, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 120\n", "Episode 43, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 122\n", "Episode 43, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 124\n", "Episode 43, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 126\n", "Episode 43, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 128\n", "Episode 43, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 130\n", "Episode 43, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 132\n", "Episode: 43 Best Action: 0 Best evaluation action: 2\n", "Episode: 43 Score: 132 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 44, Iteration 60, State: [3, 16.0, 116.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 44, Iteration 60, State: [6, 17.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 17.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4\n", "Episode 44, Iteration 60, State: [2, 17.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 7\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 10\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 13\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 16\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 19\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 22\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 25\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 28\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 31\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 34\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 37\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 40\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 43\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 46\n", "Episode 44, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 49\n", "Episode 44, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 52\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 55\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 58\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 61\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 64\n", "Episode 44, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 67\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 70\n", "Episode 44, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 73\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 76\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 79\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 82\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 85\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 88\n", "Episode 44, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 91\n", "Episode 44, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 94\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 97\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 100\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 103\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 106\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 109\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 112\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 115\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 118\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 121\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 124\n", "Episode 44, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 127\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 130\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 133\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 136\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 139\n", "Episode 44, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 142\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 145\n", "Episode 44, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 148\n", "Episode 44, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 151\n", "Episode 44, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 154\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 157\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 160\n", "Episode 44, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 163\n", "Episode 44, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 166\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 169\n", "Episode 44, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 172\n", "Episode 44, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 175\n", "Episode 44, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 178\n", "Episode: 44 Best Action: 0 Best evaluation action: 2\n", "Episode: 44 Score: 178 Best Reward: 3 Gsize State: 3.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 45, Iteration 60, State: [7, 16.0, 121.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 45, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 45, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 18\n", "Episode 45, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 21\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 24\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 27\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 30\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 36\n", "Episode 45, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 39\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 42\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 45\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 48\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 51\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 54\n", "Episode 45, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 60\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 63\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 66\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 69\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 72\n", "Episode 45, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 75\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 78\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 45, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 87\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 90\n", "Episode 45, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 93\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 96\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 99\n", "Episode 45, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 102\n", "Episode 45, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 105\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 108\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 111\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 114\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 117\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 120\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 123\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 126\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 129\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 132\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 135\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 138\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 141\n", "Episode 45, Iteration 60, State: [4, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 144\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 147\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 150\n", "Episode 45, Iteration 60, State: [5, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 153\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 156\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 159\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 162\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 165\n", "Episode 45, Iteration 60, State: [3, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 168\n", "Episode 45, Iteration 60, State: [6, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 171\n", "Episode 45, Iteration 60, State: [7, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 174\n", "Episode 45, Iteration 60, State: [2, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 177\n", "Episode 45, Iteration 60, State: [1, 16.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 180\n", "Episode: 45 Best Action: 1 Best evaluation action: 0\n", "Episode: 45 Score: 180 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 90.0\n", "Episode 46, Iteration 60, State: [1, 15.0, 119.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 2\n", "Episode 46, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 3\n", "Episode 46, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 5\n", "Episode 46, Iteration 60, State: [3, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 1, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 6\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 88.0), Reward: 1, , Cumulative Score: 7\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 9\n", "Episode 46, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 12\n", "Episode 46, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 15\n", "Episode 46, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 17\n", "Episode 46, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 20\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 23\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 25\n", "Episode 46, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 28\n", "Episode 46, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 30\n", "Episode 46, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 33\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 35\n", "Episode 46, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 38\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 41\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 44\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 46\n", "Episode 46, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 49\n", "Episode 46, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 53\n", "Episode 46, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 56\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 58\n", "Episode 46, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 61\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 64\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 67\n", "Episode 46, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 69\n", "Episode 46, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 72\n", "Episode 46, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 76\n", "Episode 46, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 79\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 81\n", "Episode 46, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 84\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 87\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 89\n", "Episode 46, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 92\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 94\n", "Episode 46, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 97\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 100\n", "Episode 46, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 102\n", "Episode 46, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 105\n", "Episode 46, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 108\n", "Episode 46, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 111\n", "Episode 46, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 114\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 116\n", "Episode 46, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 119\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 122\n", "Episode 46, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 124\n", "Episode 46, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 127\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 130\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 133\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 135\n", "Episode 46, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 138\n", "Episode 46, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 141\n", "Episode 46, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 143\n", "Episode 46, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 146\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 149\n", "Episode 46, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 151\n", "Episode 46, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 154\n", "Episode 46, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 157\n", "Episode: 46 Best Action: 0 Best evaluation action: 2\n", "Episode: 46 Score: 157 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 47, Iteration 60, State: [7, 15.0, 121.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 3\n", "Episode 47, Iteration 60, State: [3, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 18\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 21\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 24\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 27\n", "Episode 47, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 30\n", "Episode 47, Iteration 60, State: [1, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 36\n", "Episode 47, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 39\n", "Episode 47, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 42\n", "Episode 47, Iteration 60, State: [2, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 45\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 48\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 51\n", "Episode 47, Iteration 60, State: [4, 15.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 54\n", "Episode 47, Iteration 60, State: [2, 14.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 60\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 63\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 66\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 69\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 72\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 75\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 78\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 81\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 87\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 90\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 93\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 96\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 99\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 102\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 105\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 108\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 111\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 114\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 117\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 120\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 123\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 126\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 129\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 132\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 135\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 138\n", "Episode 47, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 141\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 144\n", "Episode 47, Iteration 60, State: [6, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 147\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 150\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 153\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 156\n", "Episode 47, Iteration 60, State: [5, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 159\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 162\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 165\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 168\n", "Episode 47, Iteration 60, State: [3, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 171\n", "Episode 47, Iteration 60, State: [7, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 174\n", "Episode 47, Iteration 60, State: [1, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 177\n", "Episode 47, Iteration 60, State: [2, 15.0, 125.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 180\n", "Episode: 47 Best Action: 0 Best evaluation action: 0\n", "Episode: 47 Score: 180 Best Reward: 3 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 89.0\n", "Episode 48, Iteration 60, State: [6, 15.0, 116.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 120.0, 89.0), Reward: 1, , Cumulative Score: 1\n", "Episode 48, Iteration 60, State: [2, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 4\n", "Episode 48, Iteration 60, State: [2, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 7\n", "Episode 48, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 10\n", "Episode 48, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 13\n", "Episode 48, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 17\n", "Episode 48, Iteration 60, State: [2, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 20\n", "Episode 48, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 23\n", "Episode 48, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 26\n", "Episode 48, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 29\n", "Episode 48, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 32\n", "Episode 48, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 35\n", "Episode 48, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 38\n", "Episode 48, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 41\n", "Episode 48, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 42\n", "Episode 48, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 43\n", "Episode 48, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 44\n", "Episode 48, Iteration 60, State: [4, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 45\n", "Episode 48, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 46\n", "Episode 48, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 47\n", "Episode 48, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 48\n", "Episode 48, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 49\n", "Episode 48, Iteration 60, State: [6, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 50\n", "Episode 48, Iteration 60, State: [7, 23.0, 165.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 170.0, 98.0), Reward: 1, , Cumulative Score: 51\n", "Episode 48, Iteration 60, State: [2, 24.0, 170.0, 98.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 23.0, 165.0, 97.0), Reward: 2, , Cumulative Score: 53\n", "Episode 48, Iteration 60, State: [2, 23.0, 165.0, 97.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 55\n", "Episode 48, Iteration 60, State: [4, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 57\n", "Episode 48, Iteration 60, State: [3, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 59\n", "Episode 48, Iteration 60, State: [7, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 61\n", "Episode 48, Iteration 60, State: [1, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 63\n", "Episode 48, Iteration 60, State: [1, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 65\n", "Episode 48, Iteration 60, State: [7, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 67\n", "Episode 48, Iteration 60, State: [2, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 69\n", "Episode 48, Iteration 60, State: [1, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 71\n", "Episode 48, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 73\n", "Episode 48, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 74\n", "Episode 48, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 75\n", "Episode 48, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 76\n", "Episode 48, Iteration 60, State: [6, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 77\n", "Episode 48, Iteration 60, State: [2, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 79\n", "Episode 48, Iteration 60, State: [4, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 19.0, 145.0, 93.0), Reward: 2, , Cumulative Score: 81\n", "Episode 48, Iteration 60, State: [6, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 83\n", "Episode 48, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 84\n", "Episode 48, Iteration 60, State: [4, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 86\n", "Episode 48, Iteration 60, State: [6, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 87\n", "Episode 48, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 88\n", "Episode 48, Iteration 60, State: [4, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 90\n", "Episode 48, Iteration 60, State: [5, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 91\n", "Episode 48, Iteration 60, State: [7, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 92\n", "Episode 48, Iteration 60, State: [7, 23.0, 165.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 170.0, 98.0), Reward: 1, , Cumulative Score: 93\n", "Episode 48, Iteration 60, State: [6, 24.0, 170.0, 98.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 25.0, 175.0, 99.0), Reward: 1, , Cumulative Score: 94\n", "Episode 48, Iteration 60, State: [7, 25.0, 175.0, 99.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 26.0, 180.0, 100.0), Reward: 1, , Cumulative Score: 95\n", "Episode 48, Iteration 60, State: [2, 26.0, 180.0, 100.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 25.0, 175.0, 99.0), Reward: 2, , Cumulative Score: 97\n", "Episode 48, Iteration 60, State: [4, 25.0, 175.0, 99.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 24.0, 170.0, 98.0), Reward: 2, , Cumulative Score: 99\n", "Episode 48, Iteration 60, State: [2, 24.0, 170.0, 98.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 23.0, 165.0, 97.0), Reward: 2, , Cumulative Score: 101\n", "Episode 48, Iteration 60, State: [3, 23.0, 165.0, 97.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 103\n", "Episode 48, Iteration 60, State: [2, 22.0, 160.0, 96.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 21.0, 155.0, 95.0), Reward: 2, , Cumulative Score: 105\n", "Episode 48, Iteration 60, State: [6, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 22.0, 160.0, 96.0), Reward: 2, , Cumulative Score: 107\n", "Episode 48, Iteration 60, State: [6, 22.0, 160.0, 96.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 23.0, 165.0, 97.0), Reward: 1, , Cumulative Score: 108\n", "Episode 48, Iteration 60, State: [6, 23.0, 165.0, 97.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 24.0, 170.0, 98.0), Reward: 1, , Cumulative Score: 109\n", "Episode: 48 Best Action: 0 Best evaluation action: 2\n", "Episode: 48 Score: 109 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 49, Iteration 60, State: [7, 16.0, 122.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 6\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 9\n", "Episode 49, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 12\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 15\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 18\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 21\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 24\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 27\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 30\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 33\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 36\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 39\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 42\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 45\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 48\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 51\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 54\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 57\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 60\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 63\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 66\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 69\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 72\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 75\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 78\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 81\n", "Episode 49, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 84\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 87\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 90\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 93\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 96\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 99\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 105\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 108\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 111\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 114\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 117\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 120\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 123\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 126\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 129\n", "Episode 49, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 132\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 135\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 138\n", "Episode 49, Iteration 60, State: [7, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 141\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 144\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 147\n", "Episode 49, Iteration 60, State: [2, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 150\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 153\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 156\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 159\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 162\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 165\n", "Episode 49, Iteration 60, State: [4, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 168\n", "Episode 49, Iteration 60, State: [3, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 171\n", "Episode 49, Iteration 60, State: [1, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 174\n", "Episode 49, Iteration 60, State: [5, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 177\n", "Episode 49, Iteration 60, State: [6, 16.0, 120.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 16.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 180\n", "Episode: 49 Best Action: 1 Best evaluation action: 0\n", "Episode: 49 Score: 180 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:16.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 50, Iteration 60, State: [7, 15.0, 115.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 50, Iteration 60, State: [5, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 50, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 9\n", "Episode 50, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 50, Iteration 60, State: [7, 15.0, 120.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 15\n", "Episode 50, Iteration 60, State: [4, 15.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 18\n", "Episode 50, Iteration 60, State: [4, 14.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 22\n", "Episode 50, Iteration 60, State: [4, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 25\n", "Episode 50, Iteration 60, State: [7, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 28\n", "Episode 50, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 30\n", "Episode 50, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 32\n", "Episode 50, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 34\n", "Episode 50, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 36\n", "Episode 50, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 38\n", "Episode 50, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 40\n", "Episode 50, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 42\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 44\n", "Episode 50, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 47\n", "Episode 50, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 50, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 51\n", "Episode 50, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 53\n", "Episode 50, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 55\n", "Episode 50, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 57\n", "Episode 50, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 59\n", "Episode 50, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 62\n", "Episode 50, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 66\n", "Episode 50, Iteration 60, State: [5, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 69\n", "Episode 50, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 73\n", "Episode 50, Iteration 60, State: [2, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 50, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 78\n", "Episode 50, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 80\n", "Episode 50, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 82\n", "Episode 50, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 84\n", "Episode 50, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 87\n", "Episode 50, Iteration 60, State: [4, 14.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 91\n", "Episode 50, Iteration 60, State: [6, 13.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 94\n", "Episode 50, Iteration 60, State: [2, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 96\n", "Episode 50, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 98\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 100\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 50, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 104\n", "Episode 50, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 106\n", "Episode 50, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 108\n", "Episode 50, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 110\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 112\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 114\n", "Episode 50, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 116\n", "Episode 50, Iteration 60, State: [6, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 118\n", "Episode 50, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 120\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 122\n", "Episode 50, Iteration 60, State: [1, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 124\n", "Episode 50, Iteration 60, State: [5, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 126\n", "Episode 50, Iteration 60, State: [7, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 128\n", "Episode 50, Iteration 60, State: [3, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 130\n", "Episode 50, Iteration 60, State: [2, 15.0, 130.0, 90.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 132\n", "Episode 50, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 135\n", "Episode 50, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 138\n", "Episode 50, Iteration 60, State: [1, 14.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 140\n", "Episode 50, Iteration 60, State: [4, 15.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 143\n", "Episode 50, Iteration 60, State: [6, 14.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 146\n", "Episode: 50 Best Action: 0 Best evaluation action: 2\n", "Episode: 50 Score: 146 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:13.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 51, Iteration 60, State: [5, 14.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 51, Iteration 60, State: [7, 13.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 6\n", "Episode 51, Iteration 60, State: [1, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 8\n", "Episode 51, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 10\n", "Episode 51, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 12\n", "Episode 51, Iteration 60, State: [3, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 14\n", "Episode 51, Iteration 60, State: [1, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 16\n", "Episode 51, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 20\n", "Episode 51, Iteration 60, State: [4, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 22\n", "Episode 51, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 26\n", "Episode 51, Iteration 60, State: [3, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 28\n", "Episode 51, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 32\n", "Episode 51, Iteration 60, State: [1, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 34\n", "Episode 51, Iteration 60, State: [6, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 36\n", "Episode 51, Iteration 60, State: [4, 13.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 40\n", "Episode 51, Iteration 60, State: [3, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 42\n", "Episode 51, Iteration 60, State: [2, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 44\n", "Episode 51, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 46\n", "Episode 51, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 48\n", "Episode 51, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 50\n", "Episode 51, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 52\n", "Episode 51, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 54\n", "Episode 51, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 56\n", "Episode 51, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 58\n", "Episode 51, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 60\n", "Episode 51, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 62\n", "Episode 51, Iteration 60, State: [2, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 64\n", "Episode 51, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 66\n", "Episode 51, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 68\n", "Episode 51, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 70\n", "Episode 51, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 73\n", "Episode 51, Iteration 60, State: [2, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 51, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 79\n", "Episode 51, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 81\n", "Episode 51, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 51, Iteration 60, State: [3, 14.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 87\n", "Episode 51, Iteration 60, State: [6, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 13.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 90\n", "Episode 51, Iteration 60, State: [2, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 93\n", "Episode 51, Iteration 60, State: [5, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 95\n", "Episode 51, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 97\n", "Episode 51, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 99\n", "Episode 51, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 101\n", "Episode 51, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 103\n", "Episode 51, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 105\n", "Episode 51, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 107\n", "Episode 51, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 109\n", "Episode 51, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 111\n", "Episode 51, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 113\n", "Episode 51, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 115\n", "Episode 51, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 117\n", "Episode 51, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 119\n", "Episode 51, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 121\n", "Episode 51, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 123\n", "Episode 51, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 125\n", "Episode 51, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 127\n", "Episode 51, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 129\n", "Episode 51, Iteration 60, State: [3, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 131\n", "Episode 51, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 134\n", "Episode 51, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 136\n", "Episode 51, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 138\n", "Episode: 51 Best Action: 0 Best evaluation action: 0\n", "Episode: 51 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 52, Iteration 60, State: [2, 16.0, 117.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 2\n", "Episode 52, Iteration 60, State: [7, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 6\n", "Episode 52, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 8\n", "Episode 52, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 12\n", "Episode 52, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 14\n", "Episode 52, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 16\n", "Episode 52, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 18\n", "Episode 52, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 20\n", "Episode 52, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 22\n", "Episode 52, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 24\n", "Episode 52, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 26\n", "Episode 52, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 28\n", "Episode 52, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 30\n", "Episode 52, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 32\n", "Episode 52, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 36\n", "Episode 52, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 38\n", "Episode 52, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 40\n", "Episode 52, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 42\n", "Episode 52, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 46\n", "Episode 52, Iteration 60, State: [3, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 48\n", "Episode 52, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 50\n", "Episode 52, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 52\n", "Episode 52, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 56\n", "Episode 52, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 58\n", "Episode 52, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 60\n", "Episode 52, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 62\n", "Episode 52, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 64\n", "Episode 52, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 66\n", "Episode 52, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 68\n", "Episode 52, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 70\n", "Episode 52, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 72\n", "Episode 52, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 74\n", "Episode 52, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 76\n", "Episode 52, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 78\n", "Episode 52, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 80\n", "Episode 52, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 82\n", "Episode 52, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 84\n", "Episode 52, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 86\n", "Episode 52, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 88\n", "Episode 52, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 90\n", "Episode 52, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 92\n", "Episode 52, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 96\n", "Episode 52, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 98\n", "Episode 52, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 100\n", "Episode 52, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 102\n", "Episode 52, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 104\n", "Episode 52, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 106\n", "Episode 52, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 108\n", "Episode 52, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 110\n", "Episode 52, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 112\n", "Episode 52, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 114\n", "Episode 52, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 116\n", "Episode 52, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 120\n", "Episode 52, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 122\n", "Episode 52, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 124\n", "Episode 52, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 126\n", "Episode 52, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 128\n", "Episode 52, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 130\n", "Episode 52, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 132\n", "Episode 52, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 134\n", "Episode: 52 Best Action: 0 Best evaluation action: 0\n", "Episode: 52 Score: 134 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 53, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 1\n", "Episode 53, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 2\n", "Episode 53, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 3\n", "Episode 53, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 6\n", "Episode 53, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 9\n", "Episode 53, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 12\n", "Episode 53, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 14\n", "Episode 53, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 17\n", "Episode 53, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 20\n", "Episode 53, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 23\n", "Episode 53, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 25\n", "Episode 53, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 28\n", "Episode 53, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 30\n", "Episode 53, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 33\n", "Episode 53, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 36\n", "Episode 53, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 39\n", "Episode 53, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 40\n", "Episode 53, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 42\n", "Episode 53, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 43\n", "Episode 53, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 46\n", "Episode 53, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 47\n", "Episode 53, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 49\n", "Episode 53, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 51\n", "Episode 53, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 54\n", "Episode 53, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 56\n", "Episode 53, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 59\n", "Episode 53, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 62\n", "Episode 53, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 65\n", "Episode 53, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 68\n", "Episode 53, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 71\n", "Episode 53, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 74\n", "Episode 53, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 77\n", "Episode 53, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 78\n", "Episode 53, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 80\n", "Episode 53, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 82\n", "Episode 53, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 85\n", "Episode 53, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 87\n", "Episode 53, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 89\n", "Episode 53, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 92\n", "Episode 53, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 94\n", "Episode 53, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 97\n", "Episode 53, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 99\n", "Episode 53, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 101\n", "Episode 53, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 103\n", "Episode 53, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 106\n", "Episode 53, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 108\n", "Episode 53, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 111\n", "Episode 53, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 113\n", "Episode 53, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 115\n", "Episode 53, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 118\n", "Episode 53, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 121\n", "Episode 53, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 124\n", "Episode 53, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 125\n", "Episode 53, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 126\n", "Episode 53, Iteration 60, State: [2, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 127\n", "Episode 53, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 128\n", "Episode 53, Iteration 60, State: [5, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 129\n", "Episode 53, Iteration 60, State: [1, 21.0, 155.0, 95.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 20.0, 150.0, 94.0), Reward: 2, , Cumulative Score: 131\n", "Episode 53, Iteration 60, State: [7, 20.0, 150.0, 94.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 21.0, 155.0, 95.0), Reward: 1, , Cumulative Score: 132\n", "Episode 53, Iteration 60, State: [5, 21.0, 155.0, 95.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 22.0, 160.0, 96.0), Reward: 1, , Cumulative Score: 133\n", "Episode: 53 Best Action: 2 Best evaluation action: 2\n", "Episode: 53 Score: 133 Best Reward: 3 Gsize State: 5.0 Bratio State: 1:16.0 Btime State: 130.0 convert: 2 minutes 10 seconds Temperature State: 90.0\n", "Episode 54, Iteration 60, State: [6, 14.0, 116.0, 90.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 14.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 3\n", "Episode 54, Iteration 60, State: [1, 14.0, 120.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 5\n", "Episode 54, Iteration 60, State: [2, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 7\n", "Episode 54, Iteration 60, State: [7, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 9\n", "Episode 54, Iteration 60, State: [2, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 11\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 13\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 15\n", "Episode 54, Iteration 60, State: [7, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 17\n", "Episode 54, Iteration 60, State: [7, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 19\n", "Episode 54, Iteration 60, State: [5, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 21\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 23\n", "Episode 54, Iteration 60, State: [6, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 25\n", "Episode 54, Iteration 60, State: [5, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 27\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 29\n", "Episode 54, Iteration 60, State: [1, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 31\n", "Episode 54, Iteration 60, State: [5, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 33\n", "Episode 54, Iteration 60, State: [2, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 35\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 37\n", "Episode 54, Iteration 60, State: [6, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 39\n", "Episode 54, Iteration 60, State: [6, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 41\n", "Episode 54, Iteration 60, State: [1, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 43\n", "Episode 54, Iteration 60, State: [7, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 45\n", "Episode 54, Iteration 60, State: [7, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 47\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 49\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 51\n", "Episode 54, Iteration 60, State: [6, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 53\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 55\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 57\n", "Episode 54, Iteration 60, State: [2, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 59\n", "Episode 54, Iteration 60, State: [1, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 61\n", "Episode 54, Iteration 60, State: [6, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 63\n", "Episode 54, Iteration 60, State: [5, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 65\n", "Episode 54, Iteration 60, State: [1, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 67\n", "Episode 54, Iteration 60, State: [7, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 69\n", "Episode 54, Iteration 60, State: [2, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 71\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 73\n", "Episode 54, Iteration 60, State: [3, 15.0, 125.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 91.0), Reward: 2, , Cumulative Score: 75\n", "Episode 54, Iteration 60, State: [4, 15.0, 125.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 120.0, 90.0), Reward: 3, , Cumulative Score: 78\n", "Episode 54, Iteration 60, State: [6, 14.0, 120.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 81\n", "Episode 54, Iteration 60, State: [2, 13.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 84\n", "Episode 54, Iteration 60, State: [7, 14.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 86\n", "Episode 54, Iteration 60, State: [4, 15.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 89\n", "Episode 54, Iteration 60, State: [1, 14.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 13.0, 120.0, 89.0), Reward: 3, , Cumulative Score: 92\n", "Episode 54, Iteration 60, State: [5, 13.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 12.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 96\n", "Episode 54, Iteration 60, State: [5, 12.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 98\n", "Episode 54, Iteration 60, State: [5, 13.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 13.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 100\n", "Episode 54, Iteration 60, State: [1, 13.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 14.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 102\n", "Episode 54, Iteration 60, State: [7, 14.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 104\n", "Episode 54, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 106\n", "Episode 54, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 108\n", "Episode 54, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 110\n", "Episode 54, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 112\n", "Episode 54, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 114\n", "Episode 54, Iteration 60, State: [2, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 116\n", "Episode 54, Iteration 60, State: [7, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 118\n", "Episode 54, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 120\n", "Episode 54, Iteration 60, State: [1, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 122\n", "Episode 54, Iteration 60, State: [6, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 124\n", "Episode 54, Iteration 60, State: [5, 15.0, 135.0, 91.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 126\n", "Episode 54, Iteration 60, State: [4, 15.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 14.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 129\n", "Episode: 54 Best Action: 0 Best evaluation action: 2\n", "Episode: 54 Score: 129 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:12.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 55, Iteration 60, State: [7, 16.0, 119.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 55, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 4\n", "Episode 55, Iteration 60, State: [1, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 7\n", "Episode 55, Iteration 60, State: [3, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 9\n", "Episode 55, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 11\n", "Episode 55, Iteration 60, State: [3, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 14\n", "Episode 55, Iteration 60, State: [6, 16.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 17\n", "Episode 55, Iteration 60, State: [6, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 20\n", "Episode 55, Iteration 60, State: [2, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 91.0), Reward: 1, , Cumulative Score: 21\n", "Episode 55, Iteration 60, State: [7, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 24\n", "Episode 55, Iteration 60, State: [2, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 91.0), Reward: 1, , Cumulative Score: 25\n", "Episode 55, Iteration 60, State: [1, 17.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 27\n", "Episode 55, Iteration 60, State: [2, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 29\n", "Episode 55, Iteration 60, State: [2, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 32\n", "Episode 55, Iteration 60, State: [7, 16.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 35\n", "Episode 55, Iteration 60, State: [5, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 38\n", "Episode 55, Iteration 60, State: [1, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 91.0), Reward: 1, , Cumulative Score: 39\n", "Episode 55, Iteration 60, State: [1, 17.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 41\n", "Episode 55, Iteration 60, State: [5, 16.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 91.0), Reward: 1, , Cumulative Score: 42\n", "Episode 55, Iteration 60, State: [7, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 45\n", "Episode 55, Iteration 60, State: [2, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 130.0, 91.0), Reward: 1, , Cumulative Score: 46\n", "Episode 55, Iteration 60, State: [5, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 49\n", "Episode 55, Iteration 60, State: [6, 18.0, 135.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 50\n", "Episode 55, Iteration 60, State: [1, 19.0, 140.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 51\n", "Episode 55, Iteration 60, State: [5, 18.0, 135.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 52\n", "Episode 55, Iteration 60, State: [7, 19.0, 140.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 53\n", "Episode 55, Iteration 60, State: [3, 20.0, 145.0, 94.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 54\n", "Episode 55, Iteration 60, State: [1, 19.0, 140.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 56\n", "Episode 55, Iteration 60, State: [5, 18.0, 135.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 57\n", "Episode 55, Iteration 60, State: [7, 19.0, 140.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 145.0, 94.0), Reward: 1, , Cumulative Score: 58\n", "Episode 55, Iteration 60, State: [2, 20.0, 145.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 59\n", "Episode 55, Iteration 60, State: [1, 19.0, 140.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 135.0, 92.0), Reward: 2, , Cumulative Score: 61\n", "Episode 55, Iteration 60, State: [2, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 63\n", "Episode 55, Iteration 60, State: [1, 17.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 65\n", "Episode 55, Iteration 60, State: [3, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 67\n", "Episode 55, Iteration 60, State: [3, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 70\n", "Episode 55, Iteration 60, State: [2, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 72\n", "Episode 55, Iteration 60, State: [1, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 75\n", "Episode 55, Iteration 60, State: [4, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 77\n", "Episode 55, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 79\n", "Episode 55, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 83\n", "Episode 55, Iteration 60, State: [3, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 86\n", "Episode 55, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 89\n", "Episode 55, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 92\n", "Episode 55, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 95\n", "Episode 55, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 98\n", "Episode 55, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 101\n", "Episode 55, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 103\n", "Episode 55, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 106\n", "Episode 55, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 108\n", "Episode 55, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 110\n", "Episode 55, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 113\n", "Episode 55, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 116\n", "Episode 55, Iteration 60, State: [5, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 119\n", "Episode 55, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 120\n", "Episode 55, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 123\n", "Episode 55, Iteration 60, State: [3, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 124\n", "Episode 55, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 127\n", "Episode 55, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 128\n", "Episode 55, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 130\n", "Episode: 55 Best Action: 0 Best evaluation action: 2\n", "Episode: 55 Score: 130 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 56, Iteration 60, State: [4, 15.0, 117.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 4\n", "Episode 56, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 7\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 10\n", "Episode 56, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 14\n", "Episode 56, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 17\n", "Episode 56, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 19\n", "Episode 56, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 22\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 24\n", "Episode 56, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 27\n", "Episode 56, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 30\n", "Episode 56, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 32\n", "Episode 56, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 35\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 38\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 41\n", "Episode 56, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 43\n", "Episode 56, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 46\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 49\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 51\n", "Episode 56, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 54\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 57\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 59\n", "Episode 56, Iteration 60, State: [5, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 62\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 64\n", "Episode 56, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 67\n", "Episode 56, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 70\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 73\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 76\n", "Episode 56, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 78\n", "Episode 56, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 81\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 83\n", "Episode 56, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 86\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 88\n", "Episode 56, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 91\n", "Episode 56, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 93\n", "Episode 56, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 96\n", "Episode 56, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 99\n", "Episode 56, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 102\n", "Episode 56, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 104\n", "Episode 56, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 107\n", "Episode 56, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 111\n", "Episode 56, Iteration 60, State: [1, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 114\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 117\n", "Episode 56, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 121\n", "Episode 56, Iteration 60, State: [3, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 124\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 127\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 130\n", "Episode 56, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 134\n", "Episode 56, Iteration 60, State: [6, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 137\n", "Episode 56, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 139\n", "Episode 56, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 142\n", "Episode 56, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 144\n", "Episode 56, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 147\n", "Episode 56, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 151\n", "Episode 56, Iteration 60, State: [4, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 154\n", "Episode 56, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 157\n", "Episode 56, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 160\n", "Episode 56, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 163\n", "Episode 56, Iteration 60, State: [6, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 166\n", "Episode 56, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 168\n", "Episode 56, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 171\n", "Episode: 56 Best Action: 0 Best evaluation action: 2\n", "Episode: 56 Score: 171 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n", "Episode 57, Iteration 60, State: [7, 16.0, 117.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 2\n", "Episode 57, Iteration 60, State: [4, 15.0, 120.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 87.0), Reward: 2, , Cumulative Score: 4\n", "Episode 57, Iteration 60, State: [7, 15.0, 120.0, 87.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 8\n", "Episode 57, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 10\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 12\n", "Episode 57, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 14\n", "Episode 57, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 16\n", "Episode 57, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 18\n", "Episode 57, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 20\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 24\n", "Episode 57, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 26\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 28\n", "Episode 57, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 30\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 34\n", "Episode 57, Iteration 60, State: [2, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 36\n", "Episode 57, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 38\n", "Episode 57, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 40\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 42\n", "Episode 57, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 44\n", "Episode 57, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 46\n", "Episode 57, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 48\n", "Episode 57, Iteration 60, State: [1, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 50\n", "Episode 57, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 52\n", "Episode 57, Iteration 60, State: [6, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 54\n", "Episode 57, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 56\n", "Episode 57, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 58\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 62\n", "Episode 57, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 64\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 66\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 70\n", "Episode 57, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 72\n", "Episode 57, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 74\n", "Episode 57, Iteration 60, State: [2, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 76\n", "Episode 57, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 78\n", "Episode 57, Iteration 60, State: [2, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 80\n", "Episode 57, Iteration 60, State: [5, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 82\n", "Episode 57, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 84\n", "Episode 57, Iteration 60, State: [7, 16.0, 130.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 86\n", "Episode 57, Iteration 60, State: [3, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 88\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 90\n", "Episode 57, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 92\n", "Episode 57, Iteration 60, State: [1, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 94\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 98\n", "Episode 57, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 100\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 102\n", "Episode 57, Iteration 60, State: [3, 15.0, 125.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 88.0), Reward: 2, , Cumulative Score: 104\n", "Episode 57, Iteration 60, State: [4, 16.0, 130.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 106\n", "Episode 57, Iteration 60, State: [6, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 108\n", "Episode 57, Iteration 60, State: [7, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 110\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 112\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 114\n", "Episode 57, Iteration 60, State: [5, 15.0, 125.0, 87.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 116\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 120\n", "Episode 57, Iteration 60, State: [5, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 122\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 126\n", "Episode 57, Iteration 60, State: [1, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 128\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 132\n", "Episode 57, Iteration 60, State: [7, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 134\n", "Episode 57, Iteration 60, State: [4, 15.0, 125.0, 87.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 86.0), Reward: 4, , Cumulative Score: 138\n", "Episode 57, Iteration 60, State: [4, 14.0, 120.0, 86.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 87.0), Reward: 2, , Cumulative Score: 140\n", "Episode: 57 Best Action: 0 Best evaluation action: 0\n", "Episode: 57 Score: 140 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 86.0\n", "Episode 58, Iteration 60, State: [3, 16.0, 123.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 2\n", "Episode 58, Iteration 60, State: [2, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 5\n", "Episode 58, Iteration 60, State: [7, 16.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 130.0, 91.0), Reward: 3, , Cumulative Score: 8\n", "Episode 58, Iteration 60, State: [5, 17.0, 130.0, 91.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 18.0, 135.0, 92.0), Reward: 3, , Cumulative Score: 11\n", "Episode 58, Iteration 60, State: [7, 18.0, 135.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 12\n", "Episode 58, Iteration 60, State: [4, 19.0, 140.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 13\n", "Episode 58, Iteration 60, State: [5, 18.0, 135.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 140.0, 93.0), Reward: 1, , Cumulative Score: 14\n", "Episode 58, Iteration 60, State: [3, 19.0, 140.0, 93.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 18.0, 135.0, 92.0), Reward: 1, , Cumulative Score: 15\n", "Episode 58, Iteration 60, State: [1, 18.0, 135.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 130.0, 91.0), Reward: 2, , Cumulative Score: 17\n", "Episode 58, Iteration 60, State: [1, 17.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 19\n", "Episode 58, Iteration 60, State: [7, 16.0, 125.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 130.0, 91.0), Reward: 1, , Cumulative Score: 20\n", "Episode 58, Iteration 60, State: [1, 17.0, 130.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 125.0, 90.0), Reward: 2, , Cumulative Score: 22\n", "Episode 58, Iteration 60, State: [2, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 24\n", "Episode 58, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 26\n", "Episode 58, Iteration 60, State: [2, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 29\n", "Episode 58, Iteration 60, State: [4, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 31\n", "Episode 58, Iteration 60, State: [3, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 34\n", "Episode 58, Iteration 60, State: [4, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 36\n", "Episode 58, Iteration 60, State: [1, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 39\n", "Episode 58, Iteration 60, State: [4, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 41\n", "Episode 58, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 43\n", "Episode 58, Iteration 60, State: [1, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 46\n", "Episode 58, Iteration 60, State: [4, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 48\n", "Episode 58, Iteration 60, State: [6, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 50\n", "Episode 58, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 52\n", "Episode 58, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 54\n", "Episode 58, Iteration 60, State: [3, 15.0, 120.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 125.0, 90.0), Reward: 3, , Cumulative Score: 57\n", "Episode 58, Iteration 60, State: [1, 16.0, 125.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 59\n", "Episode 58, Iteration 60, State: [7, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 61\n", "Episode 58, Iteration 60, State: [5, 15.0, 120.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 120.0, 89.0), Reward: 2, , Cumulative Score: 63\n", "Episode 58, Iteration 60, State: [4, 15.0, 120.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 88.0), Reward: 4, , Cumulative Score: 67\n", "Episode 58, Iteration 60, State: [7, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 70\n", "Episode 58, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 73\n", "Episode 58, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 76\n", "Episode 58, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 79\n", "Episode 58, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 82\n", "Episode 58, Iteration 60, State: [4, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 85\n", "Episode 58, Iteration 60, State: [5, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 88\n", "Episode 58, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 91\n", "Episode 58, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 93\n", "Episode 58, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 95\n", "Episode 58, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 98\n", "Episode 58, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 101\n", "Episode 58, Iteration 60, State: [3, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 104\n", "Episode 58, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 106\n", "Episode 58, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 109\n", "Episode 58, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 111\n", "Episode 58, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 114\n", "Episode 58, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 117\n", "Episode 58, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 120\n", "Episode 58, Iteration 60, State: [6, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 121\n", "Episode 58, Iteration 60, State: [5, 19.0, 145.0, 93.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 20.0, 150.0, 94.0), Reward: 1, , Cumulative Score: 122\n", "Episode 58, Iteration 60, State: [1, 20.0, 150.0, 94.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 123\n", "Episode 58, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 18.0, 140.0, 92.0), Reward: 2, , Cumulative Score: 125\n", "Episode 58, Iteration 60, State: [1, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 127\n", "Episode 58, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 129\n", "Episode 58, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 131\n", "Episode 58, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 133\n", "Episode 58, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 136\n", "Episode 58, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 138\n", "Episode: 58 Best Action: 0 Best evaluation action: 2\n", "Episode: 58 Score: 138 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 59, Iteration 60, State: [7, 15.0, 115.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 14.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3\n", "Episode 59, Iteration 60, State: [7, 14.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 4\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (5.0, 15.0, 125.0, 89.0), Reward: 1, , Cumulative Score: 5\n", "Episode 59, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 8\n", "Episode 59, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 11\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 14\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 17\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 20\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 23\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 26\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 29\n", "Episode 59, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 32\n", "Episode 59, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 35\n", "Episode 59, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 38\n", "Episode 59, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 41\n", "Episode 59, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 44\n", "Episode 59, Iteration 60, State: [2, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 47\n", "Episode 59, Iteration 60, State: [1, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 50\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 53\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 56\n", "Episode 59, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 59\n", "Episode 59, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 62\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 65\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 68\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 71\n", "Episode 59, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 74\n", "Episode 59, Iteration 60, State: [5, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 77\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 80\n", "Episode 59, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 83\n", "Episode 59, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 86\n", "Episode 59, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 89\n", "Episode 59, Iteration 60, State: [4, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 92\n", "Episode 59, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 95\n", "Episode 59, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 98\n", "Episode 59, Iteration 60, State: [4, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 101\n", "Episode 59, Iteration 60, State: [7, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 89.0), Reward: 3, , Cumulative Score: 104\n", "Episode 59, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 107\n", "Episode 59, Iteration 60, State: [6, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 110\n", "Episode 59, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 113\n", "Episode 59, Iteration 60, State: [7, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 114\n", "Episode 59, Iteration 60, State: [2, 19.0, 145.0, 93.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 18.0, 140.0, 92.0), Reward: 1, , Cumulative Score: 115\n", "Episode 59, Iteration 60, State: [2, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 17.0, 135.0, 91.0), Reward: 2, , Cumulative Score: 117\n", "Episode 59, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 119\n", "Episode 59, Iteration 60, State: [2, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 121\n", "Episode 59, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 123\n", "Episode 59, Iteration 60, State: [1, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 126\n", "Episode 59, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 129\n", "Episode 59, Iteration 60, State: [1, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 132\n", "Episode 59, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 135\n", "Episode 59, Iteration 60, State: [7, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 138\n", "Episode 59, Iteration 60, State: [4, 18.0, 140.0, 92.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 139\n", "Episode 59, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 141\n", "Episode 59, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 17.0, 135.0, 91.0), Reward: 1, , Cumulative Score: 142\n", "Episode 59, Iteration 60, State: [2, 17.0, 135.0, 91.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 16.0, 130.0, 90.0), Reward: 2, , Cumulative Score: 144\n", "Episode 59, Iteration 60, State: [3, 16.0, 130.0, 90.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 146\n", "Episode 59, Iteration 60, State: [6, 15.0, 125.0, 89.0], Agent Action: 1, Evaluation Action 0, Next State: (3.0, 15.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 148\n", "Episode 59, Iteration 60, State: [3, 15.0, 125.0, 89.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 16.0, 130.0, 90.0), Reward: 3, , Cumulative Score: 151\n", "Episode 59, Iteration 60, State: [7, 16.0, 130.0, 90.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 17.0, 135.0, 91.0), Reward: 3, , Cumulative Score: 154\n", "Episode 59, Iteration 60, State: [6, 17.0, 135.0, 91.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 18.0, 140.0, 92.0), Reward: 3, , Cumulative Score: 157\n", "Episode 59, Iteration 60, State: [5, 18.0, 140.0, 92.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 19.0, 145.0, 93.0), Reward: 1, , Cumulative Score: 158\n", "Episode: 59 Best Action: 0 Best evaluation action: 0\n", "Episode: 59 Score: 158 Best Reward: 3 Gsize State: 4.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 88.0\n", "Episode 60, Iteration 60, State: [7, 15.0, 121.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 3\n", "Episode 60, Iteration 60, State: [2, 15.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 5\n", "Episode 60, Iteration 60, State: [4, 16.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 8\n", "Episode 60, Iteration 60, State: [1, 15.0, 120.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 125.0, 89.0), Reward: 2, , Cumulative Score: 10\n", "Episode 60, Iteration 60, State: [6, 16.0, 125.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 120.0, 88.0), Reward: 3, , Cumulative Score: 13\n", "Episode 60, Iteration 60, State: [7, 15.0, 120.0, 88.0], Agent Action: 0, Evaluation Action 0, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 17\n", "Episode 60, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 20\n", "Episode 60, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 22\n", "Episode 60, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 25\n", "Episode 60, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 29\n", "Episode 60, Iteration 60, State: [5, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 32\n", "Episode 60, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 34\n", "Episode 60, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 37\n", "Episode 60, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 39\n", "Episode 60, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 42\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 44\n", "Episode 60, Iteration 60, State: [7, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 47\n", "Episode 60, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 51\n", "Episode 60, Iteration 60, State: [3, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 1, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 54\n", "Episode 60, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 57\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 59\n", "Episode 60, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 62\n", "Episode 60, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 65\n", "Episode 60, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 68\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 70\n", "Episode 60, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 73\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 75\n", "Episode 60, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 78\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 80\n", "Episode 60, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 83\n", "Episode 60, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 86\n", "Episode 60, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 90\n", "Episode 60, Iteration 60, State: [7, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 93\n", "Episode 60, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 95\n", "Episode 60, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 98\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 100\n", "Episode 60, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 103\n", "Episode 60, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 106\n", "Episode 60, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 108\n", "Episode 60, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 111\n", "Episode 60, Iteration 60, State: [5, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 114\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 116\n", "Episode 60, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 119\n", "Episode 60, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 123\n", "Episode 60, Iteration 60, State: [2, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 126\n", "Episode 60, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 128\n", "Episode 60, Iteration 60, State: [2, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 131\n", "Episode 60, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 133\n", "Episode 60, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 136\n", "Episode 60, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 138\n", "Episode 60, Iteration 60, State: [3, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 141\n", "Episode 60, Iteration 60, State: [4, 15.0, 125.0, 88.0], Agent Action: 0, Evaluation Action 2, Next State: (3.0, 14.0, 120.0, 87.0), Reward: 4, , Cumulative Score: 145\n", "Episode 60, Iteration 60, State: [1, 14.0, 120.0, 87.0], Agent Action: 2, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 148\n", "Episode 60, Iteration 60, State: [1, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 150\n", "Episode 60, Iteration 60, State: [6, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 153\n", "Episode 60, Iteration 60, State: [2, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 2, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 155\n", "Episode 60, Iteration 60, State: [4, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 158\n", "Episode 60, Iteration 60, State: [7, 15.0, 125.0, 88.0], Agent Action: 1, Evaluation Action 0, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 161\n", "Episode 60, Iteration 60, State: [3, 15.0, 125.0, 88.0], Agent Action: 2, Evaluation Action 0, Next State: (5.0, 16.0, 130.0, 89.0), Reward: 2, , Cumulative Score: 163\n", "Episode 60, Iteration 60, State: [1, 16.0, 130.0, 89.0], Agent Action: 0, Evaluation Action 2, Next State: (4.0, 15.0, 125.0, 88.0), Reward: 3, , Cumulative Score: 166\n", "Episode: 60 Best Action: 0 Best evaluation action: 0\n", "Episode: 60 Score: 166 Best Reward: 4 Gsize State: 3.0 Bratio State: 1:14.0 Btime State: 120.0 convert: 2 minutes 0 seconds Temperature State: 87.0\n" ] } ], "source": [ "# Training KNN\n", "\n", "# Split data train & test\n", "X_train, X_test, y_train, y_test = train_test_split(states, actions, test_size=0.2, random_state=42)\n", "\n", "# Train the KNN model\n", "knn = KNeighborsClassifier(n_neighbors=3)\n", "knn.fit(X_train, y_train)\n", "\n", "# Testing Loop for KNN\n", "episodes = 60\n", "knn_scores = []\n", "knn_rewards = []\n", "y_true_knn = []\n", "y_pred_knn = []\n", "training_results_knn = []\n", "total_timesteps_knn = []\n", "count_rewards_knn = {}\n", "\n", "for episode in range(1, episodes + 1):\n", " state = env.reset()\n", " # Add small integer noise to all state elements except btime (index 3)\n", " state = tuple(state)\n", " done = False\n", " score = 0\n", " iterations = 0\n", "\n", " best_episode = None\n", " best_reward = -float('inf')\n", " best_score = 0\n", " best_action = None\n", " evaluation_best_action = None\n", "\n", " while not done:\n", " iterations += 1\n", " env.render()\n", " state = list(state)\n", "\n", " # gsize_state\n", " state[0] = 1 + random.randint(0, 6)\n", " #state[0] = 4 + random.randint(-1, 1)\n", " # bratio_state\n", " #state[1] = 10 + random.randint(0, 10)\n", " #state[1] = 15 + random.randint(-1, 1)\n", " # btime_state\n", " #state[2] = 100 + random.randint(0, 40) * 5\n", " #state[2] = 120 + random.randint(-5, 5)\n", " # temperature_state\n", " #state[3] = 89 + random.randint(-1, 1)\n", "\n", " action = knn.predict([state])[0]\n", " evaluation_action = env.get_evaluation_action(state)\n", "\n", " next_state, reward, done, info = env.step(action)\n", " next_state = tuple(next_state)\n", " score += reward\n", "\n", " #if action != evaluation_action:\n", " print(f'Episode {episode}, Iteration {iteration}, State: {state}, Agent Action: {action}, Evaluation Action {evaluation_action}, Next State: {next_state}, Reward: {reward}, , Cumulative Score: {score}')\n", "\n", " state = next_state\n", "\n", " gsize_state, bratio_state, btime_state, temperature_state = next_state\n", " # Track the best reward in every episode\n", " if reward > best_reward:\n", " best_reward = reward\n", " best_episode = (state, action, reward, next_state, done, score)\n", " best_action = action\n", " evaluation_best_action = evaluation_action\n", "\n", " if best_episode:\n", " best_state, best_action, best_reward, best_next_state, best_done, best_score = best_episode\n", " gsize_state, bratio_state, btime_state, temperature_state = best_state\n", " \n", " knn_scores.append(score)\n", " knn_rewards.append(best_reward)\n", " total_timesteps_knn.append(iterations)\n", " \n", "\n", " minutes = int(btime_state // 60)\n", " seconds = int(btime_state % 60)\n", "\n", " # Store training results in the list\n", " training_results_knn.append({\n", " 'Episode': episode,\n", " 'Score': score,\n", " 'Reward': best_reward,\n", " 'Gsize State': gsize_state,\n", " 'Bratio State': bratio_state,\n", " 'Btime State (sec)': btime_state,\n", " 'Btime State (min:sec)': f'{minutes} minutes {seconds} seconds',\n", " 'Temperature State': temperature_state\n", " })\n", "\n", " #if action != evaluation_best_action:\n", " print(f'Episode: {episode} Best Action: {best_action} Best evaluation action: {evaluation_best_action}')\n", " print(f'Episode: {episode} Score: {score} Best Reward: {best_reward} Gsize State: {gsize_state} Bratio State: 1:{bratio_state} Btime State: {btime_state} convert: {minutes} minutes {seconds} seconds Temperature State: {temperature_state}')\n", "\n", " # Append the best action and its corresponding evaluation action\n", " if best_action is not None and evaluation_best_action is not None:\n", " if best_action != evaluation_best_action and best_reward ==4:\n", " y_true_knn.append(best_action)\n", " y_pred_knn.append(best_action)\n", " else:\n", " y_true_knn.append(evaluation_best_action)\n", " y_pred_knn.append(best_action)\n", "\n", " # Count total for every reward\n", " if best_reward in count_rewards_knn:\n", " count_rewards_knn[best_reward] += 1\n", " else:\n", " count_rewards_knn[best_reward] = 1\n", " \n", "env.close()" ] }, { "cell_type": "code", "execution_count": 1046, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Data hasil training berhasil diekspor ke 'training_results_knn.xlsx'.\n" ] } ], "source": [ "# Convert the list of dictionaries to a DataFrame\n", "df = pd.DataFrame(training_results_knn)\n", "\n", "# Simpan DataFrame ke file Excel\n", "df.to_excel('training_results_knn.xlsx', index=False)\n", "\n", "print(f\"Data hasil training berhasil diekspor ke 'training_results_knn.xlsx'.\")" ] }, { "cell_type": "code", "execution_count": 992, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Reward 3: 13 times\n", "Reward 4: 47 times\n" ] } ], "source": [ "# Print the total counts of each reward\n", "for best_reward, count in sorted(count_rewards_knn.items()):\n", " print(f'Reward {best_reward}: {count} times')" ] }, { "cell_type": "code", "execution_count": 993, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "js5OfjLaNSBv", "outputId": "2439419c-c9e4-4d2c-f6c6-e7da02c9b129" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Confusion Matrix:\n", "[[49 2 0]\n", " [ 0 0 0]\n", " [ 3 2 4]]\n", "Accuracy: 88.33%\n", "Precision: 95.10%\n", "Recall: 88.33%\n", "F1 Score: 90.10%\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/opt/homebrew/lib/python3.11/site-packages/sklearn/metrics/_classification.py:1497: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. Use `zero_division` parameter to control this behavior.\n", " _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n" ] } ], "source": [ "def evaluate_knn(y_true_knn, y_pred_knn):\n", " # Calculate metrics\n", " conf_matrix = confusion_matrix(y_true_knn, y_pred_knn)\n", " accuracy = accuracy_score(y_true_knn, y_pred_knn)\n", " precision = precision_score(y_true_knn, y_pred_knn, average='weighted')\n", " recall = recall_score(y_true_knn, y_pred_knn, average='weighted')\n", " f1 = f1_score(y_true_knn, y_pred_knn, average='weighted')\n", "\n", " # Convert to percentage\n", " accuracy *= 100\n", " precision *= 100\n", " recall *= 100\n", " f1 *= 100\n", "\n", " return conf_matrix, accuracy, precision, recall, f1\n", "\n", "conf_matrix, knn_accuracy, knn_precision, knn_recall, knn_f1 = evaluate_knn(y_true_knn, y_pred_knn)\n", "#print(y_pred_knn)\n", "#print(y_true_knn)\n", "print(f'Confusion Matrix:\\n{conf_matrix}')\n", "print(f'Accuracy: {knn_accuracy:.2f}%')\n", "print(f'Precision: {knn_precision:.2f}%')\n", "print(f'Recall: {knn_recall:.2f}%')\n", "print(f'F1 Score: {knn_f1:.2f}%')" ] }, { "cell_type": "code", "execution_count": 1047, "metadata": {}, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC8EElEQVR4nO2deZgU5bX/v9Xd0z37CsyALK7IoiABQ4gYF4iABjeSaC4mGPlpTMSoJLmRJG65SVCvMbkxROMSjVHjFvW6XDG4gQuioIgLoigIAsM2zD691++P7vet6urau7qruvt8nmcepdfq6q6q857zPd8jiKIogiAIgiAIokTxub0BBEEQBEEQ+YSCHYIgCIIgShoKdgiCIAiCKGko2CEIgiAIoqShYIcgCIIgiJKGgh2CIAiCIEoaCnYIgiAIgihpKNghCIIgCKKkoWCHIAiCIIiShoIdgiAc5+CDD8b555/v9mYQBEEAoGCHIDzLPffcA0EQsHbtWrc3pegIh8P4wx/+gKlTp6KhoQGVlZUYPXo0Fi1ahI8//tjtzbPFiSeeiKOOOirr9hdeeAHV1dX40pe+hI6ODgCpYFMQBFx66aVZj3/55ZchCAIeffRRfhv7rVVWVmLHjh2m35sgigUKdgiCcJxNmzbhjjvucOW99+3bh+nTp2Px4sUYMmQIfv3rX2PZsmU488wz8eSTT5bURfvFF1/E3LlzceSRR+L5559Hc3Nzxv133HEHdu7cafr1IpEIrr/+eqc3kyBch4IdgiB0icfjiEajlp4TCoVQUVGRpy3S5/zzz8c777yDRx99FE899RQuu+wyLFy4EDfeeCM++eQT/PjHP3bkfezsFydZuXIl5s6di9GjR6sGOuPHj0cikbAUvBxzzDGWAySCKAYo2CGIImfHjh244IIL0NrailAohPHjx+Nvf/tbxmOi0SiuvvpqTJ48GQ0NDaipqcHxxx+Pl156KeNxW7duhSAIuOmmm/DHP/4Rhx12GEKhED788ENce+21EAQBmzdvxvnnn4/GxkY0NDTg+9//Pvr7+zNeR6nZYWWS1157DYsXL8bgwYNRU1ODs846C3v37s14bjKZxLXXXothw4ahuroaJ510Ej788ENTOqA1a9bgmWeewcKFCzFv3rys+0OhEG666Sb+7xNPPBEnnnhi1uPOP/98HHzwwYb75Z133kEgEMB1112X9RqbNm2CIAj485//zG/r7OzE5ZdfjhEjRiAUCuHwww/HDTfcgGQyqfu5lLzyyis47bTTcPjhh+P5559HS0tL1mMOPvhgfO9737MUvPziF7+wHCARRDEQcHsDCIKwz+7du/GVr3wFgiBg0aJFGDx4MJ599lksXLgQ3d3duPzyywEA3d3duPPOO/Gd73wHF154IXp6enDXXXdh1qxZePPNN3HMMcdkvO7dd9+NcDiMiy66CKFQKCNr8O1vfxuHHHIIli5dirfffht33nknhgwZghtuuMFwey+99FI0NTXhmmuuwdatW/HHP/4RixYtwkMPPcQfs2TJEtx4442YO3cuZs2ahXfffRezZs1COBw2fP0nn3wSAPDd737XxN6zjnK/DB06FCeccAIefvhhXHPNNRmPfeihh+D3+/Gtb30LANDf348TTjgBO3bswA9+8AOMHDkSr7/+OpYsWYJdu3bhj3/8o6lteO2113DqqafikEMOwQsvvIBBgwZpPvaXv/wl7r33Xlx//fX405/+ZPjahxxyCA+QrrzySgwbNszUNhGE5xEJgvAkd999twhAfOuttzQfs3DhQnHo0KHivn37Mm4/99xzxYaGBrG/v18URVGMx+NiJBLJeMyBAwfE1tZW8YILLuC3bdmyRQQg1tfXi3v27Ml4/DXXXCMCyHi8KIriWWedJba0tGTcNmrUKHHBggVZn2XmzJliMpnkt19xxRWi3+8XOzs7RVEUxfb2djEQCIhnnnlmxutde+21IoCM11TjrLPOEgGIBw4c0H0c44QTThBPOOGErNsXLFggjho1iv9bb7/89a9/FQGI7733Xsbt48aNE08++WT+7//6r/8Sa2pqxI8//jjjcVdeeaXo9/vFbdu2GW5rc3OzWFdXJ44fPz5rO+SMGjVKPO2000RRFMXvf//7YmVlpbhz505RFEXxpZdeEgGIjzzyCH+8/Lf26aefioFAQPzxj3+c8d7jx4/X3T6C8DJUxiKIIkUURfzrX//C3LlzIYoi9u3bx/9mzZqFrq4uvP322wAAv9+PYDAIIFUm6ujoQDwex5QpU/hj5MybNw+DBw9Wfd+LL74449/HH3889u/fj+7ubsNtvuiiiyAIQsZzE4kEPv/8cwCpzqJ4PI4f/ehHGc9T6ypSg21DXV2dqcdbRW2/nH322QgEAhnZqffffx8ffvghzjnnHH7bI488guOPPx5NTU0Z39XMmTORSCSwatUqw/fv6+tDT08PWltbUV9fb2qbf/WrXyEej5suTR166KH47ne/i9tvvx27du0y9RyC8DoU7BBEkbJ37150dnbi9ttvx+DBgzP+vv/97wMA9uzZwx//97//HRMmTEBlZSVaWlowePBgPPPMM+jq6sp67UMOOUTzfUeOHJnx76amJgDAgQMHDLfZ6Lks6Dn88MMzHtfc3MwfqwcLAHp6egwfawe1/TJo0CDMmDEDDz/8ML/toYceQiAQwNlnn81v++STT7B8+fKs72rmzJkAMr8rLZjG58UXX8R3vvMdJBIJw+fYCV6sBkgE4XVIs0MQRQoTtZ533nlYsGCB6mMmTJgAALjvvvtw/vnn48wzz8TPfvYzDBkyBH6/H0uXLsWnn36a9byqqirN9/X7/aq3i6JouM25PNcMY8aMAQC89957OP744w0fLwiC6ntrBRFa++Xcc8/F97//faxfvx7HHHMMHn74YcyYMSNDT5NMJvH1r38d//mf/6n6GqNHjzbcXgD4z//8T+zfvx833ngjLrzwQtx1110Z2TI1fvnLX+If//gHbrjhBpx55pmG73HooYfivPPOw+23344rr7zS1HYRhJehYIcgipTBgwejrq4OiUSCZwe0ePTRR3HooYfisccey7gwKkW1bjNq1CgAwObNmzOyKPv37zeVOZo7dy6WLl2K++67z1Sw09TUhM8++yzrdpZhMsuZZ56JH/zgB7yU9fHHH2PJkiUZjznssMPQ29tr+F2Z4YYbbkBHRwfuvPNONDU14fe//73u4w877DCcd955+Otf/4qpU6eaeo9f/epXuO+++0wJzwnC61AZiyCKFL/fj3nz5uFf//oX3n///az75S3dLKMiz2KsWbMGq1evzv+GWmDGjBkIBAK49dZbM26Xt2/rMW3aNMyePRt33nknnnjiiaz7o9EofvrTn/J/H3bYYfjoo48y9tW7776L1157zdJ2NzY2YtasWXj44Yfx4IMPIhgMZmVQvv3tb2P16tV47rnnsp7f2dmJeDxu6T3/+te/4pvf/CZuvvlm/OY3vzF8/K9+9SvEYjHceOONpl5fHiC1t7db2jaC8BqU2SEIj/O3v/0Ny5cvz7r9sssuw/XXX4+XXnoJU6dOxYUXXohx48aho6MDb7/9Np5//nk+PuAb3/gGHnvsMZx11lk47bTTsGXLFtx2220YN24cent7C/2RNGltbcVll12G3//+9zj99NMxe/ZsvPvuu3j22WcxaNAgw3INANx777045ZRTcPbZZ2Pu3LmYMWMGampq8Mknn+DBBx/Erl27uNfOBRdcgJtvvhmzZs3CwoULsWfPHtx2220YP368KcG1nHPOOQfnnXce/vKXv2DWrFlobGzMuP9nP/sZnnzySXzjG9/A+eefj8mTJ6Ovrw/vvfceHn30UWzdulW3jVyJz+fD/fffj66uLlx11VVobm7OEnbLYcHL3//+d9PvwcpfmzZtwvjx400/jyC8BgU7BOFxlFkOxvnnn4/hw4fjzTffxK9//Ws89thj+Mtf/oKWlhaMHz8+o/xw/vnno729HX/961/x3HPPYdy4cbjvvvvwyCOP4OWXXy7QJzHHDTfcgOrqatxxxx14/vnnMW3aNPz73//G9OnTUVlZafj8wYMH4/XXX8df/vIXPPTQQ/jlL3+JaDSKUaNG4fTTT8dll13GHzt27Fjce++9uPrqq7F48WKMGzcO//jHP/DAAw9Y3i+nn346qqqq0NPTk9GFxaiursbKlSvxu9/9Do888gjuvfde1NfXY/To0bjuuuvQ0NBg6f0AIBgM4vHHH8fMmTNx6aWXorGxEf/xH/+h+XhWmjIjbAZSgmirARJBeBFBdEoZSBAEkSc6OzvR1NSE3/zmN/jlL3/p9uYQBFFkkGaHIAhPMTAwkHUbcxdWG+1AEARhBJWxCILwFA899BDuuecenHrqqaitrcWrr76Kf/7znzjllFNw3HHHub15BEEUIRTsEAThKSZMmIBAIIAbb7wR3d3dXLRspuOIIAhCDdLsEARBEARR0pBmhyAIgiCIkoaCHYIgCIIgShrS7CA1s2bnzp2oq6szZVpGEARBEIT7iKKInp4eDBs2DD6fdv6Ggh0AO3fuxIgRI9zeDIIgCIIgbLB9+3YMHz5c834KdgDU1dUBSO2s+vp6l7eGIAiCIAgzdHd3Y8SIEfw6rgUFOwAvXdXX11OwQxAEQRBFhpEEhQTKBEEQBEGUNBTsEARBEARR0lCwQxAEQRBESUPBDkEQBEEQJQ0FOwRBEARBlDQU7BAEQRAEUdJQsEMQBEEQRElDwQ5BEARBECUNBTsEQRAEQZQ0FOwQBEEQBFHSuBrsrFq1CnPnzsWwYcMgCAKeeOKJjPt7e3uxaNEiDB8+HFVVVRg3bhxuu+22jMeEw2FccsklaGlpQW1tLebNm4fdu3cX8FMQBEEQBOFlXA12+vr6MHHiRCxbtkz1/sWLF2P58uW47777sHHjRlx++eVYtGgRnnzySf6YK664Ak899RQeeeQRrFy5Ejt37sTZZ59dqI9AEARBEITHEURRFN3eCCA1xOvxxx/HmWeeyW876qijcM455+Cqq67it02ePBlz5szBb37zG3R1dWHw4MF44IEH8M1vfhMA8NFHH2Hs2LFYvXo1vvKVr5h67+7ubjQ0NKCrq6tsBoEORBOoCvrd3gyCIAhNwrEEKivsnafCsQSCfh98Pv0BkWokkyKiiaTt9yYKh9nrt6c1O1/96lfx5JNPYseOHRBFES+99BI+/vhjnHLKKQCAdevWIRaLYebMmfw5Y8aMwciRI7F69WrN141EIuju7s74KyeWv9+O8dcsx8NvbXd7UwiCIFR56K1tOPra5/C3V7dYfm5POIbpN7yI8+95y9Z7X/LA25j6uxdwoC9q6/mE9/B0sHPLLbdg3LhxGD58OILBIGbPno1ly5bha1/7GgCgvb0dwWAQjY2NGc9rbW1Fe3u75usuXboUDQ0N/G/EiBH5/Bie453tB5AUgbe3HXB7UwiCILJ4c0sHfvn4+4glRLy5pcPy8z/b24d9vVGst3mOe2dbJ7oGYvh0b6+t5xPew/PBzhtvvIEnn3wS69atw+9//3tccskleP7553N63SVLlqCrq4v/bd9eXhmO/kgCANATibu8JQRBEJns7BzAj+5fh3gypbDoHLCeXekciAEAYgl7Ko1YIgkA6IsmbD2f8B4BtzdAi4GBAfziF7/A448/jtNOOw0AMGHCBKxfvx433XQTZs6ciba2NkSjUXR2dmZkd3bv3o22tjbN1w6FQgiFQvn+CJ6lLx3k9FGwQxCEhwjHEvjBP9ZhX28UtaEAeiNxdPbHLL9OZ38qQGJBi1WiLNihc2TJ4NnMTiwWQywWg8+XuYl+vx/JZOqHOHnyZFRUVOCFF17g92/atAnbtm3DtGnTCrq9xURflIIdAuiPxvHSR3sQidPqlXAfURSx5LH38N6OLjRVV+C/vzkBANA1YD3YYc+JJ0XY6cGJUbBTcria2ent7cXmzZv5v7ds2YL169ejubkZI0eOxAknnICf/exnqKqqwqhRo7By5Urce++9uPnmmwEADQ0NWLhwIRYvXozm5mbU19fj0ksvxbRp00x3YpUj/enUbE+YDuRy5raVn+FPL3yCX58xHt+bdrDbm0OUOXe9ugWPv7MDfp+AZfO/hBFN1QBgM7MjPSeWEBEMWOvIiqfLX/1UxioZXA121q5di5NOOon/e/HixQCABQsW4J577sGDDz6IJUuWYP78+ejo6MCoUaPw29/+FhdffDF/zh/+8Af4fD7MmzcPkUgEs2bNwl/+8peCf5ZigpexohTslDO7OgcAADs7wy5vCVHuvPrJPvzu/zYCAH512lh89bBB6A6nApaBWMJyC3pmsJNEMGC+iJFMilwvROfI0sHVYOfEE0/UTTG2tbXh7rvv1n2NyspKLFu2TNOYkMiGrVb6IrRqKWcGYqnvf4BO6ISLbNvfj0X/fBtJEfjm5OE4/6sHAwDqQgH4fQISSRHdAzFrwY5M1GxVtxNLSo/vp3NkyeBZzQ6RP9hqpZfKWGVNOJY6qbOghyAKTV8kjgvvXYvO/hgmjmjEb848CoKQKjkJgoCGqgoAUneVWbpkmZ2o1WBH1sFFmZ3SgYKdMoRldKKJJKJxe90KRPETZpmdGP0GCHf4z39twKbdPRhcF8Jfz5uclb1pZMGORd2OPDiy2n4ek50TSaBcOlCwU4bID2A6mMsXqYxFmR2i8HT0RfHMhl0AgNvO+xLaGiqzHtNQzYIda1478sfHLC7o5GUv8tkpHSjYKTPiiSQisoO/l4KdsoVldsJUxiJcgJXRq4N+TB7VrPqYRrtlLNnj40mrmh0pE9RP58eSgYKdMqNfcWGjYKd84ZkdCnYIFwin/Z30hMeN1UEAmRocI0RRzCh7ReM5lLEos1MyULBTZii7C6iMVb5EmECZTuiEC7CMYqVOW7gkUDZfxuqLJnjrOGCjG0v2+H4SKJcMFOyUGcruApqPVb4MUBmLcBHWDaif2bEuUFbqe6wGO/LuLWo9Lx0o2CkzlJkcyuyULyyjQ2Uswg1YkB3SC3ZsaHaUgVEuredU5i8dKNgpM5RGghTslCeiKHLNBAU7hBvwYEenjGVHs6OcpRW32Hoezyhj0bFRKlCwU2Yoa9A0H6s8icSTYOblpNkh3CAcZ2UsHc1OtXXNjjKzk0sZqy8atzVIlPAeFOyUGcruAhoZUZ7IdTqReBLJJJ3QicLCBcpmylhWNDsDuWl25GUsUZS0RURxQ8FOmaH0jSA79PJEeQKnUhZRaCK8G8vZ1vNszY791nOAzpGlAgU7ZYYys0NlrPJEGdxQsEMUGqkbS0ezk87s9ETipjM0Ss1OLg7KAHVklQoU7JQZ1I1FANnt5qTbIQqNmTJWfTrYAYBukx1ZTraeA5TZKRUo2Ckz2IFbVxlI/ZuCnbJEmckhrx2i0JhxUPb7BNSnz1Vm28+zBMoW9WjK7i06R5YGFOyUGSwlO6QuBIBMBcuVcJTKWIS7sDJWSKeMBUi6HbMiZRYU+X0CgNzLWDQyojSgYKfMYJmd1vrUhGFatZQnbFXNoDIWUWjCJgTKgOSi3GWy/ZyJmQfVpoKkXMZFADQMtFSgYKfMYJkdCnbKm4EodWMR7mJmXAQgm49lOrOTCooGp7PX1jU7ijIWLQRKAgp2ygyW2WFlLLJDL09Is0O4jaTZ0b8MWQ12WDfW4NrUOc5y67kys0MC5ZKAgp0yg9mfD6Zgp6zJ6saiYIcoMBET3ViAbBioCYFyOJbgGaNB6WAnbjGzo3w8Ga+WBhTslBmsbMXKWOFY0tLJIJEUcfX/vo8n392Zl+0jCkN26zm5xBKFxYzPDgA0VjFjQWPNTpdMnNxUY0+zk1XGogVhSUDBTpmhLGMB1lYu67cfwL2rP8fN/97k+LYRhYMyO4TbWBUom8nssFJXY1UFgv7U5S2WYxmLfHZKAwp2ygwmUG6sDvKTQa+Fg/lAX+pk0kup3aKGNDuE25jx2QGsaXaYoWBDdQUq0uc3pUmgEcpWdXJQLg0o2Ckz2CqlJuRHrQ1jQZYmjtDFsahRlq1IhEkUGss+O2YyOwNSZqcikJvPDjdepWOjJKBgp4xIJEV+gqkJBlATSq2orMzHYsEOlT2Km2yfHdLsEIXFzLgIQOazY0azw8pYssy1Xc0Oe99+aj0vCSjYKSPkq/fqkB81QeuZne5w6mQST4qWuxwI78AclOtCqd8ABa8p/rDiYxx/44vY0xN2e1NKHi5QNtLsVFnQ7KQ9dhqrKhBgDsqWx0Uk06+RyiiRQLk0oGCnjGBC5IBPQNDvszUfSz5ROGwxPUx4B5bZaaxJXUhIs5Ni+fvt2N4xgHVbD7i9KSWP1Hpu4LPDHZRjSBoELkzX01BdgYpAOrNjs4zFMjtUxioNKNgpI9hBWx30QxAE1KRX9VbmY3UPSI+lC2TxwsZDNKf1EDQuIgU7Rvb3mRtNQNjHqkBZFI1L7pJmJ8gFytbHRYgZ70sC5dKAgp0ygh20LMhh/7Wd2aFgp2hhZSsm/qQyVgqmzzhAwU5eSSRFHlQYBTuhgB/VwdRjOg3mY0maHfut51HK7JQkFOyUEfLMDiDpNexodgCp5k4UH+y7a66hYEcOOxYos5Nf5AslozIWINPtGLSfc81OLq3nCs0OZXZKAwp2yggmUK5VZHaslbEos1MKsO+uKZ3Zoe8yJUyNpPUdHRTs5JWMYMdAoAwADSbbz1kwVF9VgQp/WqBsN9iRZXZE0Vp2iPAeFOyUEUygXB20X8aiYKc0YJmc5rRAmTQ7QL/s93zARJszYR/W3BD0++BLd03pwTI7XSaDncYqKbMTt+ygnKnZSYrgQTBRvFCwU0awoIb560hlLPMXukzNDp0AihWe2aEyFkce8O3vpWAnn7Dfn5GhIMOs1w47PzVW5yJQTj2+Ph3sADQwuRSgYKeM6IuqZ3bMmgrGE0n+GgBldooZdmGnMpaEPMNJZaz8YtZQkMHnY+lodmKJJA9KGmVlLLuanVDAx/WNpNspfijYKSP6FZkd9l+zZaxuRVCkdOEligdWRmii1nOO3Cm3oz9KOo08YnbiOaOhylizI88611fJfHYsz8ZKfe9Bv48vDEuhIyscS+De1Vvx+f4+tzfFFSjYKSOUmR2rs1+6FScaukAWJ4mkiGg8uxur3C/u8qA/Gs/MYhLOEjE58ZxhJrPDxcmVAfjTxqmAFLyYhQVHFQEfXxCWwuy45zfuxtX/+wFuXL7J7U1xBVeDnVWrVmHu3LkYNmwYBEHAE088kXG/IAiqf//93//NH9PR0YH58+ejvr4ejY2NWLhwIXp7ewv8SYqDfj4ENF3GSgc9vSbLWEpxIDkoFyfyklVTNYkwGcoZSB2k28kbZg0FGZJAWfs76eJt56kAPpAuY8WTFjM76ccHfIKU2SmBMhbToe3tibi8Je7garDT19eHiRMnYtmyZar379q1K+Pvb3/7GwRBwLx58/hj5s+fjw8++AArVqzA008/jVWrVuGiiy4q1EcoKtgBWxNkZax0sGOyjKUMdmjyeXEiD3bYhUF5ezmSFexQR1besFrGspLZYY/lPjtWx0WkM0EVfh9qSyizw5oQylVsHXDzzefMmYM5c+Zo3t/W1pbx7//93//FSSedhEMPPRQAsHHjRixfvhxvvfUWpkyZAgC45ZZbcOqpp+Kmm27CsGHD8rfxRQhL01eHMstYZn/8ckNBgC6OxQo76YUCPgQDPlT4BcQSIgZiCTS6u2muoizndvSV5wq4EFgVKJvR7PC5WOkskF0HZVbGCgYkzU5vCWR2mOygFPRHdigazc7u3bvxzDPPYOHChfy21atXo7GxkQc6ADBz5kz4fD6sWbNG87UikQi6u7sz/soB9iNXZnb6ownDAXuAShmLWs+LEmlV7c/4b7lrsPoVQT+1n+cP9hsMOanZkbWdA7Ddes66tyr8paXZYQGmWdlCqVE0wc7f//531NXV4eyzz+a3tbe3Y8iQIRmPCwQCaG5uRnt7u+ZrLV26FA0NDfxvxIgRedtuL9GvECgzJ2XAXLQvHwIKUGanWGHfW1U6yGH/LXevHaUgmYwF84eU2bHoszOg3SXHPHiYvqeCa3ZEU4s5Bhco+0tLs1PuZayiCXb+9re/Yf78+aisrMz5tZYsWYKuri7+t337dge20PsoTQVDAR8CafdSMweAMrNT7hfHYmVAcaGpSmf6yj14Va7eaT5W/rAuUE5la2IJMUtbxZAyO+lgJyBd3mIWRMqs7FXh9/EseClkdljmNhJPWs52lQKuanbM8sorr2DTpk146KGHMm5va2vDnj17Mm6Lx+Po6OjI0vvICYVCCIVCedlWL6PM7AiCgJpQAF0DMVNeO0yzU1Xhx0AsQWWsIkWpl+CZnWh5f5/s+PAJqe406sbKH1YFypUVKX1ZNJ5E50CMl+DlKDU7FT7pteMJESpPySKZFJFISsFOtQ2Xea8iX5z2ReIZzQnlQFFkdu666y5MnjwZEydOzLh92rRp6OzsxLp16/htL774IpLJJKZOnVrozXSchIXUqxmUg0Dl/29GgMcyO0PqU4FisZkKOr0/ixW2wmMZnUoqYwGQXHKHNlQBoDJWPrHqsyMIgmzyufr3kq3ZkWZumc1kyDNAFX6Bnx9LIbMjz9yWYynL1WCnt7cX69evx/r16wEAW7Zswfr167Ft2zb+mO7ubjzyyCP4f//v/2U9f+zYsZg9ezYuvPBCvPnmm3jttdewaNEinHvuuUXfibXu8w5MuPY5/OONzx17TfYDZxbogCzYMSFaY6aCrXWpUmIxtZ5/1N6Nidf9G8te2uz2prjOgOJCw34P5R7sMN3a8KZUsENlrPxhtRsLkM/HUhcpKzU7fp8AIR3vmB0ZIe/cqvBL4yJKITiQl/9KIVNlFVeDnbVr12LSpEmYNGkSAGDx4sWYNGkSrr76av6YBx98EKIo4jvf+Y7qa9x///0YM2YMZsyYgVNPPRXTp0/H7bffXpDtzydvfNaBvmgCqz/d58jrJZIiTx3LU8BMv2PmYO5WZnaKqIz11pYO9EbieG2zM/uzmImkvzeW2WFlrHC5d2OlP/+I5moANB8rn1gtYwGSbker/Vyp2REEQdaRZS6rG4vLMzs+bryqpRMqJgYyMjv60+NLEVc1OyeeeKKhRf1FF12kaxLY3NyMBx54wOlNcx2mj3EqoJCnYeWZnRpekzaj2Uk9prU+ldkppkwAW6UX0zbniwFFN1YlZXYASMfAiCYKdvKNVYEyADQYtJ8rTQWBlNdONJ7MCGL0YOUuv0+A3yeg2uL8QC8jt5YoBd8gqxSFZqccYVkUpzpk2MrE7xMQknUpWDEW5JqdOpbZKZ4Dhl24iikblS/Y9xZi3Vik2QEgfX5WxuoJxy277xLmkH6DFspYTLOjMjIikRT5ApEZEAKSbsesZoeVu1iXailldsIKgXK5QcGOR2GeNk5dgPpkeh1BkIR7fD6WwY9fFEVJs5PO7BRnsFP4be7sj+Ld7Z0Ff18tlJkd9t9SOKHnAjtGhjZWIn2tI5FynuBlrICFMpaOZqcnHAMrErBuLAAIWCxjxRPSxHNAyoKXgutwRhmrDI0FKdjxKF0DTpexUj/0WkX/pdkyVn80gXi6m0nK7BTPqpcFO264BC9++F2csew1vL+jq+DvrcaAsvWcfHYASMdIXagCTemOHipl5Qd7AuW0Zkcl2GG31QT9CMoCqKBFF2X5xHNA5jJfAmWfzDIWBTuER2ApWac6ntQ6sQDzZSwWfAV8AppqUiedSBG1nvPMjgvbvHlPLwBgR+dAwd9bDS5QpnERGUiz4/xorqFgJ5+E45kjS8zQoFPGUradM+yWsdjz+GKwBDI78sUplbEIz9DluGaHuSerZ3aMgh2pHl4hM6Ernoujm5kdVgrxiv5D6bNDmp0ULLNTEwzwgJ7az/MD99mx0o2lI1Bm50t5CQuQTT632HrOnscclPsiccNmGi8TTyQz9gFldgjPwAXKDl0gma+CMrNjtozVJXMnZasxp7Yt34iiyAOOSDxpaU5OrsQSSfSk6+NeCXZYdosJ1avSF5xyDnai8SQv01YF/WhJBzsHKNjJC7bKWGnhsXJsDSAZDco7sQBYbz1nE8+ZZid9fkyKqXNHsaI8V1OwQ3gCURR5m7dTmQie2QlmZnbqTGd2UvfXVVXw1VgiKRbFjJXucDzjZFfIk5Zc4OqVk2VWZidIPjtKa4ZmyuzkFUmgbN1UUC+zkx3spIeBWtTsBNLPq5IFY8Vc+lFeR4r5s9iFgh0P0hdN8NEG4XjCkfQpz+xolrH0L3TyNLF8NVYMolbl6ryQGQz5iTnqEY2T0kGZxkVIE8+DAR8q/D6ZZifi5maVLJLPjvlLkK5mpz+77RyQZ3bslbH8PqEkuhWV52nK7BCeQJ6mFUXz9WY9pMyOsoxlzjSLldXqKwMZPj3F0JGlXJ0XMkCTC1yd+B6dQMtBuZyDnf5I5vHRzMtY5ec0WwhyGRcRjiWzjmE1Q0FArtmx5qDMngfIzpFFLFJWHtsU7BCeoFtRk3YioGCZG6VAuS6UOjkY+S7IMzuCIPAVWTFkdpQdNYW8qMuzSl7R7GT57ASLT3DuNCyzU50u80plLMrsOI0oSqNrQhYyO7WhAPxpAySlbodlexqVAuX0wsyqg3IwI9gp/snnyqxUMX8Wu1Cw40GUB7IT7ec5Z3bS3Vj16ZMJFykXQbCjLGMVNLPjRc2OwkG5mnx2eGanWpHZodZz55EfB1YyO4IgSKUshW6nSyOzE7Tbeh6QjFdZAFzMOhflQqbQmZ1CNoVoQcGOB1EGO05kIrQ0O3zqeVS/tVLZ2sn0HlTG0idTs+ONfRVWZHa8ptlp7wrj27etxtMbdhbsPdnKlx0fUrBDZSynicjOGVYEyoBsZITC2bpzwFnNTsAny+wEmWaneIMddsyzMRiFDnZ+cN86zLx5JVZ+vLeg7yuHgh0Pko8yllZmpzZtKiiK+gI8Nr6ivpJldtJlLI+IbvVQikwHooULOuSZAa9kdniwo9TseKSM9fKmPXhzawfuf2Nbwd6zT3F8tNSkXMIP9Ec9sSotJdg5wydI3VJm4cNAlWUsjdZz6+MisjU71SVQxmILmUG1qd91obNU727vxOY9vVnXn0JCwY4HUWZ2nMhEKDUJjKoKP58DpHcAdCszO0VUxlKuzgu5zQc8KFBWtv1WeWzqOfMlKmQJqV9xfDTVpH7n8gGThDPIxcnyOX1mYJkd5Xwso9Zzq+MiggH5/MDiz+ywhcygulTmq1/W8ZtvdneHsacnAp8AjBtWX5D3VIOCHQ/SrRALO3Fx5t0moczIWhAEU8NAJc1O6rFSsOONC7geWZmdQgY7cs2OB/aVKIqSQFmR2YklvOGb1JP+rRXS46ZPcXyEAn5e4iXdjrPwYNuCXofB52PJ2s9FUZS6sRRlLKuzsaKK1nNAptnxSObTDuyYH5zO7ACF6y5774vUTMAjhtRlLbYLCQU7HiSrjOVA+aOXn8yzf2y1JuZjZWl2ish1l12s2CKysAJlmWbHA4FELCHyFZ3SZwfwRqaOBfsH+qMFs+hXZnYAEinnC57ZsTDxnKEmUO6TDSnOufVcpYzFAuD+IhYos33eUFXBs12FKmVtSA9APuqghoK8nxYU7HiQbM2OE91Y6mUswNx8rC7us1OEZax0doWtalwrY3lA3yTXWFUGU4d/KODjgaAXgldWxkokRa4VyzfS8SEFfjQfKz9EbAwBZTSqaHaYXicU8GW9pmWBsqrPjjnjVS8jd03nn8fAbsQp3k8HOxOGU7BDKMiHZkcaBJp9gjHykYglkvxioOzGcmoqez7p6E2dDA9qqgLgXhnLC91YbCSET5BS/IIgucSGCyje1qI3Iv3+C+Vzoybgp/lY+SHMrQ9sBDsqmp3O/syssxzWQm7VZ0cunC4JzY5MJ1VrckSQE4iiiA3pMtbRFOwQSpSCSEcEysxUUCWzI83HUhdiyjNNdZVMs8NMBd2/OOoRjiV4rX1YY1X6tsJss3wIKOCNbiy5XkIuDvWSi7J8nx3oL0ygoWbNQPOx8oMkULZ++VHT7GiJkwGgIt1CHjcpxo0lS1uzUyULdgrRXdbeHca+3gj8PgHjhronTgYo2PEkyoM314tzIimJUpVTzwEp26OVpmUaitpQgLdyFksZi10sAz4BQ+pSZaxCXdCVF2ovZHaU7skML3ntyIOd/b2FCTTUMjuk2ckPTINo1WMHkLWeq2R2lOJkQK7ZyaWMVTqanaoKWRlLY3HrJBu4OLnWVtnSSSjY8SBMp9BaVwkg94BCfgFTEyhLZSz1g1kpTga8dXHUg10sm2qCUqmmQNusdHn1gkBZns6W46WRET2yzGahAg01a4ZmKmPlhZwyOyoCZZblaVDL7NgsYwX9Kg7KRVzG6pdpdmoLqEHyil4HoGDHk7DgYkg9E9TmdpFkKxK/T8gY4smoMxCssTIWK2EBxdN6zi6WLS4EO8oLtRcyO1oXmkLvGz3kWoKOApWxBqKZ4yIAKmPli4hGwG0GVsbqGlDL7GQHO060nvPMjgcWAnZhi5jKjDJW/oM3rtdxuRMLoGDHc0TjSb76HpLO7OSaPemVzf1RM/Ey6sZSz+wUh4MyKyU1VQelbFSBTloHFC3vXtDsKD12GF4yFpT7THUUqIylqtmppjJWPsjJZyd9DuqNxHkAo6vZseigzAXKskWhGR8yrzOQUcZisoX8fh5RFPHeDiZObszre5mBgh2PIRcnS5md3C5AbEWiJk4GjMtYyiGgQPFodlgZq7k2iMpgYbNRB9IrTmbR7onMDlvhKfQSXhkZEYknMvZToQINVc1OLQU7+SCXMpb8HMSCHGlURLZmJ2DRQZmNi2AzpADp/NhfxK3n8hExtSEpYMwnO7vC6OiLIuATMKatLq/vZQYKdjxGl6xkVJ2+AEVyzJ6wIKZape2cvRdgMbOTXvl4wRVYD7UyVqEFym31qQydFzI7LBOXldnxiAarR1FKLVQZS02z00IC5bzAfoMhGwJlv09Affp8xcpXuq3nNgeBBgPybqzUdhazZiezGyv9efIc7Lz3RScAYHRrneviZICCHc/RLTPvc0oXY5jZMUjTKoeAAtLF0uuZnY6MMlZhXZ/ZRbKtIRXseMFUkA1B9apAOSvYKVRmR2WcCtPsDMQSru+XUiKXMhYg1+2kfhudOmWsoMUyVlTVQTmd2YkmCubo7TRumAoyvY4XxMkABTueQ55FkbxscszsqIgv5bBxEaXYjcU0Hy21UmanUEaInszsaIhDvfJ9Kk/AhWg9F0UR/SpaptpQgJvLFSrDVA7kUsYCZC7K6YxOl5Ot52pTz9O/iURS9MQxbAcWYFZV+E2NB3ICSa9DwQ6hAhNn1lcFHLsAsVqzWts5AENHTeUQUEBKQXs+s5PODDTXBAt+QWcC5da09iqaSLq+MpTS2erdWG4HO6ztnHUNFsJUMBxLgn0t8uynIAiS106BhNLlQK6ZHeV8LNZ6ri5Qtjf1vEKl9Rwo3o6sgQzNTv5b6TPEyR7oxAIo2PEcmZkdZwIKvSGg8tu1HDW79bqxvK7ZSV8sm2XdWIXaZjYEtDWd2RFF806u+UIrs1OVnpPldrmGBfsjm6sBpC4u+Q6o5Sd9pdlic00qUC3U2IpygGl27AwCBeQuyiY0OwGrs7GyW8/9PmmcSqGGZzoNL2NV+CXZQh7LWF8cGEBnfwwVfgFHekCcDFCw4znyo9nJ7jSRY5TZUQ4BBWTdWB7QoejBMzu1hRcosy4RptkB3O/IkjupyvGKzw7L7AxtrOKr63z73LDMZ3XQD58v05qhuSb1my/U2IpyIBefHUA+HyuKcCzBS0u64yLMtp4ns8tYgKTlKkaRsiiKmbOxClDGYlmdMW31toTo+YCCHY/RnRfNjvbEcyAz2FErs/Btkp1MJP2LdzM7iaTIL1KpMlZ6fxYoe8EFyvVSsON0zf+m5zbhjD+/anpIoZaDsmc0OxEmhg+gqbowDsaSpi37+OCZHSpjOUbuAmVp8jnL6vh9Aj+PyWEBs3XNTmbQy12Ui7D9XH7OyShj5fGzMHHyUR4pYQEU7HgOuacNOxnkeoFU6zSRw27XEuDpZXbcvjjq0TUQ41qMpuqg1EFWgGyUfAjooNoQ/OmMgdOZnYfWbse7X3Txk4sRxdKNVVdZUTAH4/6olNlRQu3nzpOrQFmu2eF6naoKVcNUu2WsoCKzU13Ek8/lx3RlwGdoIusE7+3oBOCdTiyAgh3PIdfsOGX0ZpTZkYsylQeAKIpcR6Gu2fFusNOR1lnUVwZQ4ffx/RlLiNw8LF+wjJIgpAJXdvJ0MtgRRZGXypQt21pwnx2PC5TrKgOyQZz51cv063Qr8uwSlbEcIxefHSBTs8P1OiolLEDWeh6376AMGOsavQzrNAz6fQj4fRkC5Xw0TIiiiPc8NCaCQcGOx+BZlKqAYyMZuGZHI7Pj8wlcz6MUrfVFE0ikRbXybiy5eNrtDiMt9vO281QpQp7NCOdZOyOf1+P3CQhVsBZY506W/dEE9w/pls0K0iMs89uQ4x3NTjqzE5IHO/mdztyn063IXJSpjOUcrIwVstt6LtPs6M3FAqybCqr57AClkdlh1xMW7IhifrrLtnX0ozscR9Dvw+hWb4iTAQp2PAcz8GuoqnCsvZtNt9UyFQS052Oxi2iFX8gQtbJxA0nRvGFXoZHmYqVOhPIhqPku17CyR1P6gs1WmE52gsmzDfIxI3rwThilZscjs7F6IqyMFZCVkNzL7FAZy3m0OgLNItfsdPG282yPHcC+ZiegEKrXBPW9yLxMWOEhVVnhA/t4+ShlsZL62KF1GU7UbuOdLSEAaLWe5+bPYqTZAbSNBeXbI6+Jy1dlXu3I2s89dlKZHUEQClZ+Y6JaVgZhB73Zk64Z2KoWMF/Gkk8/llPtUMk0VzI1O6nvLd+BRp+Owzj7/shU0Dm4QNl2GSud2ZGVsYwyO2a7seIq4yIAWRmrCH12BhQdmIIgGHbg5sL7HjMTZLga7KxatQpz587FsGHDIAgCnnjiiazHbNy4EaeffjoaGhpQU1ODY489Ftu2beP3h8NhXHLJJWhpaUFtbS3mzZuH3bt3F/BTOAsXKFdWZAj4chEpG2l2AO32824VcTKQypKw2Mft0ocW3D25Rlr1Fapcw4aAZgU7DpbPMjI7JstYAxqdMFUFHpKqBdPs1FYGeNt3voMdthhQzezQMFDHieQsUGbjImI8CNXS7DhVxmILxf4izOyoLXBqDYY/58IGD+p1AJeDnb6+PkycOBHLli1Tvf/TTz/F9OnTMWbMGLz88svYsGEDrrrqKlRWSq28V1xxBZ566ik88sgjWLlyJXbu3Imzzz67UB/BUZJJUdF6Lv04c2nxNtLsANrzsSQNUebJRBAEvjILR73Zfs7nYqkEO/ku10gt76yElnpfZ4MdKcAxW8aKGPjsuF7GCktlrEJldng3lsrxwXRDnf2xvIvaywWtUqpZWKOEKKbM6wD1URGAVMaKJ0UkTRh6GraeF3FmRx7M52s+VjIpSpmdgxodfe1c0V7qF4A5c+Zgzpw5mvf/8pe/xKmnnoobb7yR33bYYYfx/+/q6sJdd92FBx54ACeffDIA4O6778bYsWPxxhtv4Ctf+Ur+Nj4P9EXjYMdjfVUFKvw+BHwC4smUKVQD1Fcvhq8bMZHZqVTvNpDGV2S/d2WFDwOxhGfLWPKJ54xCuShnaXbYlHgHg52ujMyONZ8dZbDD9ovbAsweWWZTQIFMBbnpZvbxIS+PdA7EMCgtdifskUiKXONnN9gJBnyoCfrRF03g8/19ANQNBYHMrqpYMomQT/89+dRzZWaniAXKSs0OgLwZC37e0Y+eSByhgA9HtNY6+tq54lnNTjKZxDPPPIPRo0dj1qxZGDJkCKZOnZpR6lq3bh1isRhmzpzJbxszZgxGjhyJ1atXu7DVucGyKMGAj58InBgZoXcyZ0hlrMwMgdoQUIZT4yzyhXwuFqNQ/kAH+jM1O6E8tJ7byexoeZzIy1hmVsD5ojcjs1MoU0HtxUDA7+MXUipl5Y78XGG3jAVIguSt+/rT/9ZvPQeMGymSSZF3nmZ1YxVx67l8VAQjX5qdDV90AgDGDq3P2odu462tkbFnzx709vbi+uuvx+zZs/Hvf/8bZ511Fs4++2ysXLkSANDe3o5gMIjGxsaM57a2tqK9vV3ztSORCLq7uzP+vABbnWea9+XWfp5MirppegYrcfUqMztcs5N9ISj0rCmrqAc7hZkBxS7QzVkCZefe1043lpaDsvxE6NZk50RS5IGH3FSwcyDGL0L5wEjAz80Nqf08ZzKCnRzGCLDFF7tYqy3GgMyuKqMyJBsVAQABv7Ibq3gzO2rHfL66y5i/jpfMBBmeDXaS6R/eGWecgSuuuALHHHMMrrzySnzjG9/AbbfdltNrL126FA0NDfxvxIgRTmxyzkhZFLXp4vYuQP2yk4uanbp0X+pkodeNpaQYMzssgxHJc+mNDQFlZaxQHgTK8m4sM2UsURQ1rfrl/3ZLtyPXD9SGAtwyQBTza+pnJOBvJmNBx2D+VkG/L2sOmRWUmRyt1nO/T+CNFEadkPLMT7ZAOf+uw/lCrXQtlbGcPda9NulcjmeDnUGDBiEQCGDcuHEZt48dO5Z3Y7W1tSEajaKzszPjMbt370ZbW5vmay9ZsgRdXV38b/v27Y5vvx3koyIYubZKs1WrT8j0mVFSyzI7YY1uLA3NDuC+qFUNURRlrecqAuU8Z3Y6FR4/+dDsWM3sKGfkyPH7BL6Nbn2f7DOEAj4EAym3VxZk57OUNaAzLgJAwcZWlAPsPGbXUJCRFexoZHYEQZB1ZOlnB2Oy40OzG6sIBcpqRqJasoVcyBAnU2bHPMFgEMceeyw2bdqUcfvHH3+MUaNGAQAmT56MiooKvPDCC/z+TZs2Ydu2bZg2bZrma4dCIdTX12f8eQG1LEpVjmZvcg8RtdkxDL5yUaRp2QVINbPjkOlhPuiPJngWRR7shAqk2dESKOcrs9MTNrZ+V87IUVKoQFCL3ojkscNoKUCg0adjKgjI2s+pjJUzuRoKMhoU3Vdamh1APjLCKLOTut/vE/gsO0Z1EZsKqmV2+BR3BzM7n+3rQ180gcoKHw4f7C1xMuByN1Zvby82b97M/71lyxasX78ezc3NGDlyJH72s5/hnHPOwde+9jWcdNJJWL58OZ566im8/PLLAICGhgYsXLgQixcvRnNzM+rr63HppZdi2rRpRdeJBah72rCAImI32GEeIjp6HUDbd0FtCCjftvTqzIuTz1mwEQr4Mi5iVQXQGcmHgHLNjt/5zE6nLLOTSGuz1EYeMNhJr8IvIKAiHqyq8KNrIOZa8Mr2mVwf1lwTxGf7+iyJg2/+9ybUVVbgwq8daurx/TrjIgCaj+UkUhnVucyOIGQGyEpYG7mR105Uo+0ckDQuxZjZ6Vf12cnUPDkBy+qMH9agen5xG1eDnbVr1+Kkk07i/168eDEAYMGCBbjnnntw1lln4bbbbsPSpUvx4x//GEceeST+9a9/Yfr06fw5f/jDH+Dz+TBv3jxEIhHMmjULf/nLXwr+WZygWyWzk6sIuF/HHVZOrYbvgnx8hRK+bR5sPd8vazuXZ7QK4SfDMi5sCCggpe3z1Y0FpLJwesGO0ao61yxirsgNBRlNFsc17OgcwJ9e3AyfACz46sGm7OqNMjtUxnIObiiYgzgZyCxb1VdWZGVi5LCSlFnNToUv+zdTzTMhRZzZyShjqcsWcsGrZoIMV4OdE0880TD1fsEFF+CCCy7QvL+yshLLli3TNCYsJtTKWLlqdsxmdrQEePLBpEq8LFA+oCgjMaRsVP62mWUA2BBQAAj6mTDamWAnkRR5idHvE1L/HohjqM55Rstjh1HpchlLbijIsDqbantHqhU5KQKdA1EMqas0eIZsQaARKEouyvmd0VUO5GooyJBndvRKWID5kRFxjYnnQGZmRxRFXUmA1wirlrGkyedO8d6OTgDeDXa8l2sqYyQDP1k3Vo4BRZ8Jjx1AZipoRbPDL47eK2OpiZOBwmR2lHodwHnNTvdADGydMLQhdUE3EilrdWIxqt3O7DDNTkj6rTVbzeykHXWBTE2THqydWCsIlJyc8zt9vRxwqowl1+xoiZMZjpSx0ovFeFJ0zZrBLoXw2UkkRXywM2Xh4sW2c4CCHU+hKlDmF2ebZSwDPQJDrYwVSyT5qlc92MnNAyifsFV4iyLYCRUge9GpMBQEnPfZYdmjupBkvmc0H0tthSenUHPDtGBlrDqFZgcwH+x8IQt2zDwnkZTa8bWOEaa7osxO7jglUJZncxo02s4ZlstYKnoTuS1Bsel2uM+OWjeWQ2Wsz/b2oj+aQHXQj0M9KE4GKNjxFKoC5VzLWAZ6BEaNikOo/OKpJgD0chmLrcKVZSx+Qc/j6oy/t+wk7LTPDtPrNNZU8N+LUWZHGgiofth7pYxVm1Ow08//30y7utwkTlOzIxsGalR2J/RhgWUoV82OvIxlmNkx2XqekDyAlPh9Aj9uik23wxbK1WplLIc+C9PrjB9Wr6ufchMKdjyEqmaHtXfbzJ5YFShHE0luuMe2py4UUP0BV+ZoeJhPtDI7XISbxwu6cggoIAU7TqXA5dkjVvY0MhY00ktUcZdYtzM72WUss+LgjMyOie4p9ln9PkHTh4pldmIJsShN5byE1rgSq8jPkVruyQymwTFsPY+rTzxnFGtHlqrPjsOzsbanFxmHD/FmVgegYMdTqJsKstZzexdJ0wJl2YHAsjt6Q0BT25Z/sa9dJPfkzMGNfJvzWHorhGaHZ3aqg1Jmx6CMNaBy0pNT5bJJpFbrOWDeVPCLTimzY0azw4+PoF9TdFoV9POMIM3Hyg3HBMpyzY6BQDloUbOjHBXB4B1ZRTYyQm1chFyz40S2Us2t3mtQsOMh8tmNZZTZCfh9WWnaLh33ZEA2PNKTmp3s7ApQGOM85RBQQEqNOxXsyB2a2fdjKFBOv7dW26/bmp1elW4seRnL6KScSIrY1Rnm/zYTmPQbuCcrt4Paz3PDKYFyZYWPLyCMMjuBdCt5zGC+WlxHswPIMjtFNgxU3VQw9VmSojOZ+f0ai0svQcGOR4jEE/xHl6nZybUby3gIKENpNKU3BBSQLppuaTz00MrsFMJBWTkENPW+5kSSZmFZi8aqCv79GJaxDDI7lQUo8enBNTuybqyW9PcXTSQNU+67u8OIyy5o5jQ75sq8rP083xPYSx2nfHYEQeBaHa25WAzTZSwdzQ4gBcTFVspUO+6rK/x8ZpgTn+eAzNfMq1Cw4xHYhSrlBiqdeCtzvDgzAabeEFAGN5pSZHa0Vk4hnnXynmbHqPU8n9kL5RBQQOaz49C+4l4+1UGucekxmHOjNfGcUYi2fD26VbqxqoJ+ngU4YND6LdfrAOYcj7mA32AxwLJ0lNnJDae6sQDpO2lyuIxVEVAvY7FsSLFNPlfL7Ph8Ag/wnQh2qIxFmIad6GtDgYxpwLk6KDP9jdZEZzlKY0GjMpZXHZQzxjVoBjv5C9CUQ0AB2SBQhzM7qTKWycyOgTjU7WBHzVQQkLI7+w1av1knFluxdpjQ7PSbPD6smhsS6jhVxgKAH5xwKGaNb8W0w1p0Hyd1Y9lvPQek7F+fBzPZWsQSSZ7tVFpO1DjoCq21uPQSFOx4BK0sSs5Tz7mpoJkyVqb3gp6hYGrbvNmNxVKqPiG7LbUQ7fKFEShL72G69dzIZyfosmZHZRAoYL79nGV2DhlUA8BcyanP5PFhVShNqOOUQBkAzv7ScPz1u1MMA1XJZ8dc67lWsMOyf/1FVMaSd45VBjM/l1PGgqIoSqV7CnYIIzSDHd56nmNmx1QZK9N7Qc33R04V7xTz1kqHrTKaqoMZWTIg/9kLtSGggNxnx5n37ZR3Y1WZ68YyKiG46bMjiqIs2Mn8rZqdj8Xck5ldvZky1kDUXGaniQTKjsB+gyEHgh2zsO6quEFmJ67joAwUZ2aH7W+fkK1FcspYsDsc59kjCnYIQ7QCi8ocAwqzK1cgu4wlDQHVECi73KqshdZcLCBzm/NhEKc2BBSQlbEc7sZKCZRZZseojJV6byMHZTe+z/5oAon0CTO7jGUys5NuO2fBTk84bli6MGu6SWUsZ+BlLBMDWp0iaLKMFTUoYxVjZkc+KkJpreDUfCx2TFQH/Y5k7PIFBTseQW3iOQBUBXMLKKxodpRGU6Y1Ox4LdvTqx6zjSBSd64ySozYEFHC+9fwA1+zITQVjugEcd1DW9NnJbTRJLrBsmN8nZAVjVstY44c1cN2OUXbH7DgVq07OhDpOCpTN4lQZS8rsFFGwozLxnOFUGasYxMkABTueQW0IKCDZqueq2THXjaUoYxlpdjzqoNyh0wYpv5CG8zDAVE2vA8jKWA4EWOFYgp/E5OMi4klRNyjmegmNVTXLboRdSNP3RqROLOUK1EygkUiK2NmZCnZGtVRzrZaRsaDZzA4FO87AvZ5cCHYMBcpGDsoqI3W8jl4HplNlLL3zrZegYMcjaAuU7QcUyaQomaaZ8NmRWhEzx0UYOSiH4/kpCdlFK+AAUicylnHJRxeZ2hBQQApancjssAu43yegLhRAddDPP5NeR5aRg3Kli1PPu8PaQbmZQGNPTxixhIiAT0BrfSXf/0bBCWV2CkvEoCMwH7BWcvM+O1qaHTZOpXgyO2Ed00yn5mOx0Txq51svQcGOR9DW7NjvxpJftIxM04DsMpZWaY3BRIb5KgnZxWilkU8XZbUhoICzmp3OAalUJggCBEGQjAV1OrJ4CcHAQdmN2T9S23n2b82MezETJw9trITfJ/ATr1H3FMvsaOmYGKz9vTcSz+uokVLHjTKWVc1OQFOzU7yZHbXft3S+z+3zsHMelbEIU/DMTrWGQDmeRNLA7lwJO5ELgrmVVK3Md0EURb7a1gp2MkpCHiplGdWQczVq1ENtCCggBTuJpMiFuLbfo491YknvwbJvPbrBTlqgbKDZcUOD1aNiKMhgQaue/obpdYY3VgOQgs0DBmUsFvDWGGQ+6yqlYbhG5oaENpJAuYDdWKbHRRhpdoovs2OqjGVgRmqE1tBlr0HBjkfgQ0A1urEA61kBtgKpCWbrINSQd2P1ybpjtFrPK/wCmAbXSyJl42AnN+8iPbQ6wYIynUyupSy1Upk0DFSnjGXkoCwrYxW6LKk2BJTBW8979YKdVCfW8Kaq1HPSgaCRQFnS7OhnPn0+QeairG9uSGgj+ewUXxmrughbz/VK1yx4yzVTtV9HNuAlKNjxCNo+O9JXZPXiLJ/obAa5YI1tT1A2IFSJIAie7MgyCnby2WLdoanZcS7YkU88Z9RZKWNpfJ/su0wkRe4mWyh6dcpYbMXYo1NCYpmdg9LBjlmNTb/JzI58OyizY59iKGNpC5SdcxwuFGHdMlbmLES7FMNcLICCHc/AVuTKbqyA38dNrqwKatmJ3EwnlvxxfdG4pCGq0s8KedFF2ci6PJ8BmtoQUAAI+ATeDp2r5uOAyjgKKbOjfSE2dFCW3V5okXKPbFyKkvrKCsMSEi9jNaXLWGY1OxFzmZ3Ua6b2MWV27CGKIj9PhAqZ2eHBjsnWc41uRacEvYVEV7OjmIVoF62hy16Dgh2PoDd0026Lt9khhwz5wWzUiSVtW/5KQnYQRVGmm9HP7OQjQDugMgQUSGXB2AozV5Ey+24yNTsss6N94grreG4AqbIk71Qr8PfZrTEXC2AlpNRn1crU7OhkwY61MpbZqeeAJFIut46sRFK0rBdUQ/67d8dnx1wZy8hnpz/qre5TPQbS9hpq3lpOBW/S4lL/WuE2FOx4gGRS1NTsAFLXk9ULkNkhhwy2qu6RlbG09DqMSpfnKSnpHohzrZFmZodpU/JQe1fLujCCDnntsGxFo6pmRz3zEU8k+cpWSxwqCEJeO9X00OvGAvTLUsmkyLuxpGAn/XgDgXK/QTu+2jaU03wsURRx9l9ew6w/rsq5/BqRLS4KKVCuMDn1PGYwLoItGuNJ0VPdp3r0x7S7DZ0yFTxAmR3CLL3RONhCQS2TYncsg5VREYD044/Ek/wHrNWJxbct4O6kbCWsxFAbCnBvGyU8G5WHFmI9jx+nvHbk7skMPh9LQ7Mjn62md2HPZ6eaHnJTQTWk9vPsEtLe3giiiST8PgFt9ZUZjzcKTPigXBPZTzMt8KVG10AM737RhU/29OKDnV05vRY73nyCdkCRD1imJm5YxjIYFyELGPqLpP08HNUrY+Ue7IRjCS7YptZzwpCu9MUrFPCppnfttgRzPYJJzY7cWI250RqWsXhnkzdWOtI0cO3trspTZkdrCCgj5JDXTqeqZoeNjFA/cck/a0hnLlGu40ns0qNTxgL0gxfWidVWX8k9Uhp567l2YBKNS9kuM9nPcjQW3NcrBZdrtx7I6bXk4mQz3aFO4VQZK+D38WOnWEZG6I2LcKKMxY6FgE9Q7aT0EhTseABewjKYQRWxGFBIegRzmZ1gwMdLLTs6wwC0h4BmbZtHjNb29xqnVCtzHMGhhdYQUAYvY+Wc2VEpYxlldmSdWHoXGh5Yu1bG0g921AKNLxQlLPnj9YaByv1SzHQslmOws7dH+qxrP+/I6bW4x06Bh0U6VcYCim9kxIDOPmemgrGEaPv8Lc9kFzKAtQMFOx5AT5wM2PeFsdJpwmCpzV1d6cyOkWbHY63nLBDQa4Os4jojZ7NRWkNAGU4NA1UVKBtodvRaUOW4NflcMhXU0uykgle1EpKyEwtIHUtGw0BZ+j0Y8Gmu5uWU4+RzeWZn3ecHchLmSg7ehb3ssO4q49lYqc8W1PktsKC4aDI7OmUsuSjf7nysYpmLBVCw4wl427nGqpYHFHluPQck7QIrYxlpdvLZ2WQHbnClUkZi5EuXoqfXAaR222jC/vuKosgzSGqanR6Nk5bZVXWVS/OxmG5AM7Oj012lltnx+wT+29UaBjpgcggoo6nMg519vVF8vr/f9mu54bEDyHx24vqBWtSgjAVI59Ki0ezEtGdj+X1SQ4LdTFWHifOtV6BgxwMYzqCy23oesdZ6DgC1odQ27OxKlbGMNDshm+LpfMFcdltq9YKd/LTLaw0BZfDW8xwCw55IHPF0t5l667nGhd1iZqfQ87H0BoECQHNtOrOj4qKsdE/mzzEYBip3GDcD+00d6I/mPPKjWJAHOwCw9nP7uh0mkg8VONgJpLOssaT+cRdP3x/QKWMVXWbHIMBUzkO0CvfY0TnfegUKdjyAkWbHrqDWiocIgxlNsVKLYTeWx8pYHQYeO0D+SjVaQ0AZTrSed6bfo6rCn3ECq5ONi1ArNbDPanShqXLBSiAST/Dfm1YZS6+EtEPhnswwMhbss5jZaa4OQhCApFg+2Z19ac0OC9TX5aDbMXLwzhdOlrGYZqdY5mPpjYsAcu/IojIWYQlDzY7NVmmrJ3MgsyMLMKHZsZl1yhd8paGTVs3XBV1rCCjDiW4sLR8fVgKNJpKqry9pdvQP+UoXfHbkpTetzE6TRneVKIrcUHCETLOT+Rz1bBf3oTJZ5g34ffx3pcx4lCrsc04/YhCA3DqyJM2ON8tYRg7KgCyzU2RlLK2Mbq4jMMzIBryC7WAnGo1i06ZNiMeLI8L1Mt0GBn52RzKwH7AygNFDebExzux4y0HZaC4WkL8ATWsIKMOJbqxOFhgrTi41wQAfyqomUjZyT2a4IVDukZWw1ITdgLyEFMtw893bG0EknoRPANoaKjOeY+SibNWHCgAGpctp5RbszBrfCgD4ZE8vL9daJcJ1YwXO7JiejWWs2ZFclIvjumdUvs41s8PnYpViGau/vx8LFy5EdXU1xo8fj23btgEALr30Ulx//fWOb2A5YLYbK2K5G0tbnKaF9WDHW63nZmrI+XJQ1hoCyggG2L7KIdjRyOz4fIJUylLR7ZhdVbsR7PQa6HUAaZ8mkiI/XgBJnDy0oSrrImXUKs6+fyvHx6C6csvspPbd6NY6HDKoBgDw9jZ72R1p4rk7refmfXZ0NDt8npQ3zndGSD476pd6p8pYXjcUBGwEO0uWLMG7776Ll19+GZWV0kpq5syZeOihhxzduHKBiTOVQ0AZdnUxbPVhrRtLUcYy8NnxWjeWmTJWvhyUtYaAMpxoPT+gkzZm31WXirEgu7CrzciRw0t8BS1j6bsnA6msWF36t9khyywop53L4Zodg9ZzK9YMPLPTU/qaHVEUsTcd1A2qDWHyqCYA9ktZbnVjmc3sGDkoAzLNTpEMA+XHvWYZKzdjQeZorne+9QqWg50nnngCf/7znzF9+vQME6Hx48fj008/dXTjygXjzI691badk7kyMNISjErblu7GKnD3jhrhWIKLsvUyO/lyUNYaAsrgrec5aXayPXYY9TqZHWYuZtSN5ca4CL0hoHLYdyrP1PBOrEaVYIeVsTQyO/28zEtlLDV6InH+Wx1cF8IUFuzY7MgKu1zGMh4XYb6M1eeB850RiaTIs8h5K2Olz0cl2Y21d+9eDBkyJOv2vr4+zzsoehWjCeN2NTt2TubyYKdOR0PBCNn0AMoH7CJY4Rd4FkANu+M3jNAbAgrIMjs5+Ox0cvdknWBHR7NjdKGRyliFy9QZGQoy+GwqWfu5cgCoHKNhoLlkdvaWQbCzr0eaM1dZ4ceUg5sBAO9u77QVsLPfoNbMunzBylJxg+ntpspYQWbN4P3Mjvz8ZtiNZcNUMJEUZU0ZJRjsTJkyBc888wz/Nwtw7rzzTkybNs25LSsjjAXK1kXAyaSI/pj1k7m8jGXksZPaNu+0nssNrvQCb7vBo+n318rssG6sHN6XCZT1ylhqxoKmHZTzOBFeC7aqrDXK7Kh0ZKm5J/PHp78HLUFtvy2BMtPslH4Zi31G9pkPG1yDpuoKROJJW0NBXRsXIeuu0vPaYWUsM63nxdCNJc/Oamn1+OexEbx19kf5AOti6MayPLnrd7/7HebMmYMPP/wQ8Xgc//M//4MPP/wQr7/+OlauXJmPbSx5jFvPWfbE/EVyIJbgP0RLmZ1Ki8EO0794QLOz36RYLh+lGqMhoIAzPjtSGUsl2NEtY5nTS+Qr66UH229GgwTVBMdahoKAtI+0BMr93IPEQmanjml2yiCzI9PrAKmF7eRRTXh+4x6s3XoAk0Y2WXo9SaBc2DKWPHiJJUSoJX0TSZEbReqVsYops8MWLKGADz6NDL1UxrJ+vLNFR31lwNS4FbexvIXTp0/Hu+++i3g8jqOPPhr//ve/MWTIEKxevRqTJ0+29FqrVq3C3LlzMWzYMAiCgCeeeCLj/vPPPx+CIGT8zZ49O+MxHR0dmD9/Purr69HY2IiFCxeit7fX6sdyjXAsweuqxmUs8z9IFqkLgjVfi1pZYGQ0BBRwx4ROC7NtkJV5cH02GgIKSJmdnFrPdUplfBioikDZrDjUDc2O6TJWbWYZSxRFU5kdrWGgPLNjYTEwuIw0O8pgBwAmj0qVsuwMBXVboAwAMY1jT/770PPZ4ZqdIhAom7GbkMpY6qVePfZzt3rtoctewlJmJxaL4Qc/+AGuuuoq3HHHHTm/eV9fHyZOnIgLLrgAZ599tupjZs+ejbvvvpv/OxTK3LHz58/Hrl27sGLFCsRiMXz/+9/HRRddhAceeCDn7SsEbBUuCNDUmdgpY3HDtAq/ZlSvhtxt2chQMLVt3gl2zBpcsexFNJ5EMila2j9aGA0BBZzx2VGbeM5wQqBc7UIZi088N+gaVJax9vVGEYknIah47ADSMFBRTD1nSF3mYyRrBuuanf19Ucd+O16FZa9Yuz0ATDk4lc1hQ0Gt6DS5z06BB4H6fQJ8aedrrY4s+e0Bne9UKvu4f74zgi1YqnWO+VzKcsXUdg5YDHYqKirwr3/9C1dddZUjbz5nzhzMmTNH9zGhUAhtbW2q923cuBHLly/HW2+9hSlTpgAAbrnlFpx66qm46aabMGzYMEe2M5+wVXhdKKB54rRTWuCGaRbazoHMMpaRxw7gLQfljnQbpJF1uXylE44nLF3stDAyFARks7Fyyuxod2OxbiZ9gbL3xkX0mNXsMIFyel8z5+S2+koeSMphw0A7+2Po7I9lBTt2NDssa5hIiugciBXNid4Oe7lmR1pgHn1QA4J+Hx8KenDae8cMbmV2gJT7dTSeRExDoCzv1NJvPU+XsYogs2PGbiKX2VhGvmJew3KIfeaZZ2aVm/LJyy+/jCFDhuDII4/ED3/4Q+zfv5/ft3r1ajQ2NvJAB0j5/fh8PqxZs0bzNSORCLq7uzP+3ILrdTQ6eABZx5OFgILPxbIa7FgWKOfHs8YObDZVc41+WlVe1nMqSDtg4sDP1VQwLtMFqQuUWWZHR6CsYS7GcGMQKM/sGGQSW3jreSqo1dPrMPSGgfZZHBcBpC6ELNAs9VKWWhmrssKPow6qB2C9Bd0tU0FAPjJCP7Pj9wm6HajVRdR6bmb4by03SbQR7LAyVpEE/JaXtEcccQR+/etf47XXXsPkyZNRU5MZ2f/4xz92bONmz56Ns88+G4cccgg+/fRT/OIXv8CcOXOwevVq+P1+tLe3Z7XBBwIBNDc3o729XfN1ly5diuuuu86x7cwFPgRU50Rvp4zFJ55bWLUCmcGRqcyOh8pY7CKoNZuK4fMJCAZSKz2ntClGQ0ABmWbHpkCZdWIJgvp3U6+T2eEDAT2t2THK7KQuugfS+1pPr8NoqgkC+/pUvXb6bcyOA1IX/87+GPb1RDC6tc7Sc4sJtWAHAKYc3Iy3t3Vi3ecd+Obk4aZfzy2fHUBqJ9cqY0VNtJ0DssxOEQiUzXRg5mIquL+IJp4DNoKdu+66C42NjVi3bh3WrVuXcZ8gCI4GO+eeey7//6OPPhoTJkzAYYcdhpdffhkzZsyw/bpLlizB4sWL+b+7u7sxYsSInLbVLt0GnViATYFyxPrEc0CR2TG4+GRuW9JyDd9ppBqysWCukgU7Dq3QjIaAAnLNjr337OTdD+q6ICmzo1LGilubeu7GbCzDYKealbHMZ3ak+VjZ+6TfxrgIINWKvXlP6XvtsGBncF3mxcyuk3LE5G8wH1T49RcaZtyTASmzE0uIiMaTquVTrzBgRaBsI9jh57wiKWNZDna2bNmSj+0wxaGHHopBgwZh8+bNmDFjBtra2rBnz56Mx8TjcXR0dGjqfICUDkgpdHYLI48dwJ4vDJ94bqHTBEhlHwI+AfGkqFtak7ZNOtgj8aQrKWqGFcFcVdCP7nDcsYyUKc1OjlPPuUOzxvcimQpqj4sw9NmRibcTSdHQVNIJennrublurHAsif5oXBoVoeKezNCalg7ISr0WFwSSi3Jpe+2wkRjKzA4LdthQUDWxvBo8s1NgU0FAPjJCXbPDMj56HjtAZmDcF4kjGPDuhX4gauxrxIKdSDyJWCJpqYW82ATKOYWloihCFPUtuJ3kiy++wP79+zF06FAAwLRp09DZ2ZmRYXrxxReRTCYxderUgm1XLhh57ACyC1Aiyb0gjODuyRZP5IIg8NSmlW4swP1SlqVgx+HymxmxXq7dWEycrJx4zpBMBdUEyuYM3apc+D7Z9hrNcKsJ+vk+7OiLytyTtctYWsNARVG0vSAoh5ERfZE4zwwog51BtSFbQ0HNunjnA+6ibNCNFTAoY1X4ffw3aMeIr5CY0ezIZQtWS1ms9bxYyli2fnX33nsvjj76aFRVVaGqqgoTJkzAP/7xD8uv09vbi/Xr12P9+vUAUlmj9evXY9u2bejt7cXPfvYzvPHGG9i6dSteeOEFnHHGGTj88MMxa9YsAMDYsWMxe/ZsXHjhhXjzzTfx2muvYdGiRTj33HOLohMLMCdQzsyemLsA9XGBsvVVFMscmAkaKvw+vvp3syOLdccAQJOBZgdw3kXZaAgokLvPjtE4ClbGisSTWYGKWQflkCwtX4hSViIp8t+qURlLEIQMwfEXOqMiGFrDQFNl19T/W10QDC4DY0EWyFVV+FWbHOyUstxyUAacK2MBUlBeSBG/HQbSwZjeMV/h9/Fj3mopy8zQZS9hOdi5+eab8cMf/hCnnnoqHn74YTz88MOYPXs2Lr74YvzhD3+w9Fpr167FpEmTMGnSJADA4sWLMWnSJFx99dXw+/3YsGEDTj/9dIwePRoLFy7E5MmT8corr2SUoO6//36MGTMGM2bMwKmnnorp06fj9ttvt/qxXIOVHPT0MXa6hyTxpfW26l+dNg6XnHQYJg5vNPV4yUXZvYO/JxyzZF3utBDXaAgo4IBA2SB7VBsMgEmmlCMjzKzygJR4u5DDXeUzeYy6sQApAP9kdy8GYgkIAjC0Mdtjh6E1DFS+KjfaJ0qkkRGlH+wMqlP/rdkZChpxsfXcqTIWIJWyvG4saEazA9jT7YiiyLPZxVLGsnwlvOWWW3Drrbfie9/7Hr/t9NNPx/jx43HttdfiiiuuMP1aJ554om4Z7LnnnjN8jebm5qIxEFTDTBnL5xMQ9PsQTWSv2LXgAmUbmZ2Z41oxc1yr6cdXVvjRF0242n7O9mN10G9qdVbleLCjn3UBgKA/3XpuM5ukN/EcSP1O6kIBdIfj6A7HeAZCFEVLJYSqCj/CMfO/tVxgYupQwGdK7Mnaz9/bkZrNNKQupDtYUmsYqFzDZNUYsBw0O3s19DoMZi7IhoKa+e7cGhcBSK7Imq3nceOJ5wyWCfR+ZsdcJq0mFMD+vqil4K0vmuAZaiPHeq9g+Ve3a9cufPWrX826/atf/Sp27drlyEaVE7z13KDNO2Sx/VxqPc/dMM+IfA3WtALXs5holwfstfPrYTQEFMh9NpZRZgeQj4yQLu7RRBJM6qVnMMZwOhDUg60mjUpYDPbZN3zRCUBfrwNoDwPtszEqglEOmh2ttnPGoYNq0WhhKGgiKfKsihsC5aDZ1vOAceDLNF5Fk9kxCHbszMdiHjuhgM9yZtQtLAc7hx9+OB5++OGs2x966CEcccQRjmxUOcEyEkbBjtWyi9Rpkv8fYiHLHlqYyZDJcdIp2MwQUCB3zY6eezJDGhkhnYjDUen9zFxoKgs4MsKsoSCDBS8f7EwZgerpdQDtYaB2RkUw2DDQ/b3RgjZoFBKjYMfnEzB5pHndjvw487JmJ+Azn9nxukDZrJGoNB/L/OdhJayWmqCrdiNWsHykX3fddTjnnHOwatUqHHfccQCA1157DS+88IJqEEToY8ZUEJB3D5m7UEqdJgXM7HigjGU6sxNwLtgxMwQUyL0bS28uFkNtZAT7Xvw+wdA0DShsZsesoSCDubWy9n2jYEc5DJRd9OwaCsq3IZpIonsgbsqiodjgHjs6JYopBzfjhY/2YO3nHbgQh+q+nvw4C7ngTRNIf+9xDc1O3JZmx+tlLBbsGHQ52shUcQPXIilhATYyO/PmzcOaNWswaNAgPPHEE3jiiScwaNAgvPnmmzjrrLPysY0lTZfJ8gvLnkTMZnZsmgragQU7ZrctH1gOdnj2IvfSm5khoEBmGStp0kJATqeBzw6gbiwo16eYWYVVF3A+lllDQYayTGhUxmLDQAFp/wFyTZv146Oyws+HlpaqsSD32KnT9iNTDgXVI5wOToMBnyvDU50sY0ndWN7O7JguY6UX2lYEyqztvFjmYgE2MjsAMHnyZNx3331Ob0vZkUyKfAgi80jRwmr2JBdNglUk/Yt7mh3LZSwHsxdmDAUBZIg4o4kkKn3Wvhsz87fUjAWtCkPZb60QAkw+BNRk0KGcw6NnKAhkDgM90B/lou2BmP3MDpAKAnoicezrjeDwIbW2XsPLGJWxAGtDQblA3iXHYakbK/fWc0mz4/HMjmnNjvX5WAdkZaxiwfIv7//+7/9Uu6See+45PPvss45sVLnQE4nzdmmjMpbV6eJ2B4HawWmDPjtYzuw4KFA2E4QAmel7qyJlURR5N5beZ2RBs1pmx6xWwp0yljXNDsOojAWoDwOVNDs2g50Sbz83E+xYGQrq5sRzQK7Z0W89t9aN5e3MjlnNDtcgWcnsWBjN4xUsBztXXnklEonsk6Aoirjyyisd2ahygekqQgGf4UkgZFEE3GtzEKgdQgW8OGrRZUK8K8fJAM3MEFAgUw9gVbcTjiX5c/QySCxolrsom13hMapcESibC8qVwc4wg8wOIDMWlAU77EJlt8zLO7JK1FiQtdUPMtBkTDm4GQCw7vMO3ce5aSgImMnsWNHsFMfkc7OLnNpK6z47rBvLaOiyl7Ac7HzyyScYN25c1u1jxozB5s2bHdmocsFKNqLKYhnL7rgIO1jNOuUD65kd54Kd7emBlG0N+qscQRD4ydTqfCyWParwC7oddlLruXTiili80BQyU5dLZmdIXcjUZ1IbBsozOzbLvKXstROOJfiFT0+zAwBHHdQAAPh0T5/u45iezw1xMiCbeq5x3LGFhNG4CMCeoNcNrLeeWy9jlXRmp6GhAZ999lnW7Zs3b0ZNjXbNlsjGzMRzhhUvm2RSRH8st5O5FZz2rLFD50B6IrjFYMeJbNRHu1Jt0GPa6g0fa7cjS96JpScyZk7c3Tlkdpx2l9aDtbvWmSy3pj5/6v/NlLAA9WGgjmV2SrCMtTedrQoGfIbfy9CGlHv1ru4B3cdJujGXMzsajQHxpAXNDi/7eDuzI5WxjE0FAbtlrBLW7Jxxxhm4/PLL8emnn/LbNm/ejJ/85Cc4/fTTHd24UsesoSBgLaAIxxO25/7YwRut56kD1bpAOfds1EftPQCAsUPrDB9r12vHTCcWoG4qyNLZIZMC5SoHO9WMsFrG8vsENKY/40EGnVgMtWGg/VFzFwIt2BiFUgx2pLbzkGH3Xlt9KtjZ3RXR7ciSylgeFShbcVAOMQG/tzM7/dH8ZXaKbeI5YCPYufHGG1FTU4MxY8bgkEMOwSGHHIKxY8eipaUFN910Uz62sWSxUnqx0t7NVhyCYH3ujx2q+La5V8ZiF3c9Dxo5TpWxOvuj2NUVBgCMbjUOdnLO7FTpfz5VU8G4Rc1OQQXK1kwFAekEazaz06ia2cnNmqGUy1j7Tep1AKA1HexEE8ks40Y5rguUA/plLEmzY6KMVQSaHVEU81rGKsZgx/KR3tDQgNdffx0rVqzAu+++y6eef+1rX8vH9pU0ZoaAMqyUFlg60s7cHzt4oYxl3UHZmW1mWZ0RzVWmLtgs2DE7vZ5hxj0ZkHVjqWR2zGYxCqnZ6bZoKggALTUhfLq3z7DtnMFElAcyurGY6Waump3Szey06HRiMYIBHwbVBrGvN4r27rDmc1hmR2+OWT4JGmR2ojZaz+VNAF4jEk/y7L7RiBipjGXueI/GJcf4Ymo9t7WsEQQBp5xyCk455RSnt6essJPZMaPZsXrhz5VCajzUiCWSfFViVaCca8cR0+sc2Wqs1wGkk671Mpa59nYpsyOdiJkY2uxMIjfGRdRaCHbmf2UkAOAUk8Nq1YaB5prZGSwLdkRRLBrLfDNIbefmLmRtDZWpYKcrjPHDGlQfY2UQbT4w3XpuQkA9sjlVPt3e0Y9wLOFatkoP+ULF6cwOy5D6hMJdY5zA9C9v9erVePrppzNuu/fee3HIIYdgyJAhuOiiixCJlN4qJ5/kS7PDfoyF+iGGCpgJUEOexTCTJQOc0xlZ0esAkm4mYtFnh088N2j1ZMFOOJbk2SO7mZ1CDgI1+70BwBnHHISHL56GIekSihFNKsNA+3IYFwFImp1wLOnpcoYdpLZzc502TLfT3h3WfIzbAmXWZRXXOO7Y7RUmMuEHNVahqboCsYSITenj32uwYzfgEwyzVVZnY/Ghx9VBV9yw7WI62Pn1r3+NDz74gP/7vffew8KFCzFz5kxceeWVeOqpp7B06dK8bGSpYimzw9q7TWQE2OsWysqbuaK61XrOPm9dKMBn4BjBL+g5inA3pk92ZjqxAPuZHbPGhfIMCcuasBOfaYFygYIdURRlU8/zF5g3qZgKsgDQziBQ9jwWKJWa185eE4aCctrSHVntXTrBjssCZSfLWIIg4OjhjQCADTuMJ767gZUFDhNcD8QSSJgYY1OMeh3AQrCzfv16zJgxg//7wQcfxNSpU3HHHXdg8eLF+NOf/kSDQC3CMhJG7smANUGtNL6gsGUstzI7ZifHy3FinlciKeJjFuyYzOxImh173ViNBp/R7xN4uzALdsIWW88LNRurPyqdXK1odqyiHAYKyAfl2s80lKpuhwVvRh47DJbZ2aUT7LDjzGwp1Wmkbqzcy1gAMCHtL/T+Fx4Ndiwc8/IFkplJ7sXYdg5YCHYOHDiA1lapRr5y5UrMmTOH//vYY4/F9u3bnd26EsfKRdpaGctaZ1KuSIaH7mZ2rJTtnMhebOvox0AsgVDAh4NbzHlMBdMne7uaHTPfqbL93K7PTr5nY7FgzO8T8to1qDYM1IlBuaU6MsK6ZiclFN+tV8ZyuxuLa3b0u7HMZHYAyUzRq5kdsx47QEo0zkwXzZSyDpR6sNPa2ootW7YAAKLRKN5++2185Stf4ff39PSgoqJ4xEpegLUHGw0BBaxlTyQxa4EzOy5pF3IJduJJUTO1bQQXJ7fV6U47l5Nvnx1AypIwTZhlB+UCCZR7I6ntqw0F8irwZcNAAakcmKtmB5AyO3tLrP2caXYGW9Ts6GV23C5jVRhMPbfSeg4AE4angp2Pd/e42oWqBSvPm11EWDEWLPnMzqmnnoorr7wSr7zyCpYsWYLq6mocf/zx/P4NGzbgsMMOy8tGliq2xkWY0MV0Ms8ZA08Wp+BZJ5dMBe0EO3L9it2TlaTXMVfCAuQ+O9bek2t2TJxglCMjLM/GKlBZstuioWAuyIeBJpIiP45yGZTLyjylpNmJxpP8eLKq2dmtF+y4LFBmx52mZieeKm+Z1fwNbajEoNogEkkRG9OLHi8xYDGTZqUjq6MvbU1QqsHOf/3XfyEQCOCEE07AHXfcgTvuuAPBoPRh//a3v1ErukWsjIsI2SpjlYlmx8bnDQV8vLRht5RlZUwEf18bs7GSSZFfgMx8RmX7OR8I6LFuLDuGgnaRd2TJnW+dyOyUUhlrf/pCFpBlw4xgwU5PJK55sQxzkbxL3Vg+fc1OPGmtjCUIAi9lvefBUpbVBY6VYOcAG3xcZMGO6WXNoEGDsGrVKnR1daG2thZ+f+ZOfOSRR1BbW+v4BpYq4ViCX/DMaXbMt0qb9WRxCklPVDyaHUEQUBnwYyCWsO38/JFFcTJgz0G5OxwDa5Iwk61TGgvyVbVJ8WVl2nBxIJbIq4dMjw1DQbuw8l9HX4wHfz4ht8GUgy1qdh5Zux3rPj+A35x5lOkMghP0ReK46on3MeuoNswa36b72H09qXNHS635tuLaUAB1oQB6InG0d4Vx+JDs6wAvY7k9CNRQs2P+tz7hoAa8vGkvNnhQpBy2aDdhrYyV+r2XbBmL0dDQkBXoAEBzc3NGpofQ5+PdqQtlddCPWhMiSSuTxaWSR5lkdmx0YwEybYqN7e6NxLGtIzXt3FJmJ6AvlFSD6XVqgn4eLOmhldmx6rMjita7xqxgdQhoLsiHgfbJDAVzCeSsjIwQRRG//b+NePCt7QUXtb62eR8ee2cHblj+keFj91lsO2cYtZ+7LlA2KGPF0mWsoIUglLWfv+/BzA7LXlrP7BifC1nreUsRTTwHbAQ7hDMsf78dAHDikYNNraB49sSEaLSzr7DdWLyNO55E0oRPg9N02nSMlrx2rAc7LFhtrQ9ZWuHYyewcsNCJBWRrdqxeaOSPy2cAa3UIaC6w7+hAXzTnUREMrtkxkdlp7w7zoLVLZoJZCJg26rO9fYYjDqx67DB4sKPRkcU6NV3T7LDW87j6+SlqsRsLAI4+SBIpF8Jt3AoDFpsSJGNB499mBy9jFVdDEgU7LiCKIg92jNLKDLYqNypjxRJJ9KRP5kaeLE4hP6DymQnQwoqeRY4VHZSSj3ZZMxNk2PHZ4Z1YJk8u9YpuLJYNNLvKq/D7eDo/n7odqYyV/99pIx8ZEc15VASDZ3ZMCJTZ7wUw71TrFPLSxPs79MW0tjM7zEW5a0D1fu6z49Wp5xZ9doDUQmdwXQhJEfhwl7eyO1yzEzT3eZixoJEbeDIp8sUXZXYIQz7e3YvP9vUh6Pfh5DFDTD2HlbFiCVHX5VK+aizYbCzZCcKNUpYVobecXIS4H7WnxckW9DoAEPRLWTCzmJ14zuBlLIXPjpVVtVOzw/QoaDeWbBgoazs3W9bTgvnQ9EUThvvpI9lYgZ4CBzu9GcGO/kWZaXbYOAyzsMyOVvu522UsNi7C0GfHwvgDQRC4uaDXdDtWjURrQ6njw0ig3B2O8esPZXYIQ1hW5/gjBple1ZotLTBxcn2l+dEJuRLw+xBInyTcaD+3O/jUSju/EimzYy3YYdkka2Usa5krJlBWOihbWVUXoiPLzhBQu8iHgQ44lNmpDQW4BsuolMWCY6Dw07LlFzAjvRD7HGY9dhi8/VyrjMUFyu6WseJa3VhsXIRFAfXRw73ZkcV1eqaDndTjjLKOTK+T+u17bwCqHqaO9ieffNL0C55++um2N6ZcePb9XQCAWUeZK2EBmV0j4VhC0x/kQL87bYGVFX70RuKudGSxMo/VYMeusFoURWxkmR2rZSwDJ1c1uix218kFysmkyLNIVlyKqwowMoKZCha69dwpzY4gCBhUG8KOzgHs7Y1gRHoathryMlahMzvyMtZ7X3TqPjbXMpZmZifu7TKWHc0OIOl23vNYZkcqY5kL6NmCw6gbq1jnYgEmg50zzzzT1IsJgoBEwltCLa+xdV8fPmrvgd8n4OtjW42fkMbnExAM+BCNJ3VX250FHhXBkIKdwn7/8v1h1USx0mb2YmdXGD3hOAI+AYcNtma3wDU7Ft7zgAX3ZCBToCzPtFkp2zg1KFUPdtG3MvHcLvJhoE5pdoCUSHlH54CubicST+DTvb3832a8TJxE/n5b9/ejayCmuTDItRtLO7Pj9rgIk2UsC63ngBTsbN7bi75IPCeTSieRfHbManbM+ewUq3syYLKMlUwmTf1RoGPM8g9SJaxph7ZYzr6YKbtI+o7C1lPZii3fRnRKWAlLEKxrP6zMG5PDzAQPH1JrqhVcTtBG67nlbixZZkf+W7FSQsilLd8sbnRj9YTj/DeTi6EgQ/La0W4//3RPH+IynV13octYikzSBzolF/Y5rGp2hqbnY+3rjSKiKGWLouRYHfJoZoeZDVppPQeAIfWVaKuvhCgCH3rISdmqz45ZU8FinYsFkGan4DzLurAslLAYZi7OhZ6LxXDLa4dduOpCAdMmaAy7upSPbIyJYNiZjdVpU7PTH03wC10w4LO0f9i+6TcxBdkuTLvCxJH5RD4MdGdnqmPIiVU460jR0+zI9TqAC2Ws9HfIdHVaup14IskDa6uZnabqCh7I7+nO3BdyMb774yI0pp7H7ZWxANlQUA+VsuyOizAqYxVzZsfW0d7X14eVK1di27ZtiEYzVzQ//vGPHdmwUmRn5wDe3d4JQQBmjTdfwmJIfjbaF+dCTzxnsEDMrhuxXboGUr+/BhvBHdelWOw4YrNwxgy1ptcBbAY7A9Y0O7Wyi/ienlRZwapzbSHmY7FVZCEyO2wYaGd/DDvSwU6u3ViAlAHRD3ZSwXFzTRAdfVEXBMqp73DiiEas+/yAppi2oy8KUUw5S1t1XxcEAW31ldjW0Y/27nCGfiliM7voJCzQSyRFJJNiVuAfS4+LCFgsYwGpoaDPb9xtqIcqJFbHRZgtY0mGgmUQ7Lzzzjs49dRT0d/fj76+PjQ3N2Pfvn2orq7GkCFDKNjR4bl0CWvKqCYMqau0/HwzLsqFHhXBkLbNncyOnTZ7aQSHtQAtl8yOHZ+dA33WMjsBvw+1oQB6I3HsTq+yrV7YKwsw+byQredAahhoZ38MXxxIZ3acCHZMzMdiwfGUUU3494e7C996ng6uvnpYSyrY0chAMEPB5poQ/BazpACkYEchUma6MZ9gXRPjFPIuq1gyiZAv87u3W8YCvNmRZdU13WoZq9jmYgE2ylhXXHEF5s6diwMHDqCqqgpvvPEGPv/8c0yePBk33XRTPraxZHjWopGgEq6L0bkAWTWgcwqzpodO05XDhHc7XjLhWAKfpcWmY21kdpjPjrUylvUAlol+mWDUSieW/PEDecrUReIJvg8K0Y0FSCdoltmpdkKgzI0FtTU7LDj+8iHNAAovUO5LZ3a+cmgLAGBbRz//Tcnhep1aexcyrZERcnFyvuasGSEPYpSlrERS8i6zU8ZiIuXP9hk7VBcK6z47rIylfy4s5jKW5W92/fr1+MlPfgKfzwe/349IJIIRI0bgxhtvxC9+8Yt8bGNJsLcngre2dgAAZtvQ6wDSxGC9gILV3AtlKMhgngv57N5Ro8tm2zlgT6C8eU8vkmJKozCkzrqDKPfZMSlQjsaT3NXUSrDDAog96S4hq1qJfPvsyLMbtQXqYGE6NhZk1eTYeg4YZ3b29UawtycCQQC+NKoJgHut50MbKjGqJVVeUnNSZh1lg238rgFtY8GwxdEF+UAexMQUCw25aNmqzw6Q+g0Ma0iJlD/Y6Q2RMus4NLvP+SDQaFx35E8xl7Esf7MVFRXw+VJPGzJkCLZt2wYgNSB0+/btzm5dCbHiw90QxVR9d3iTth+HHma6sXhmxyXNTuHLWOn2ZRvBjp0LOtfrtNXbWqVynx2TmR22AvdZ7DZjImWu2bEa7OTZZ4cJp2uCflslEzsojwknMjuD05qdvRrBzqZ0VmdUczVa0140PeEYRLEwM+REUURvVDJvZFmIDTs6sx5rt+2cwbx2lO3nPLPj0sRzIKXZYj8zZUdWRrBjs8zGSlleGQoq+exYy+yIItCvc8x3lFMZa9KkSXjrrbcAACeccAKuvvpq3H///bj88stx1FFHOb6BpQI3ErRZwgLMBRQH3NLsmMg65QMm3rWT2bEjwuV6HYtjIhiSZsfce8qHnFrppmLt56wzxqqZW77HRUht54XLQCpT7060nrPAoCes7jElD45ZsBpLiAWbIdcfTYDFVbWhgK4JnhTs2Dt3DOWZncz5WG577DAqNAw95WWtCp+9gOxoj3VkWS1jVVb4+KJDryOrrDI7v/vd7zB06FAAwG9/+1s0NTXhhz/8Ifbu3Yu//vWvll5r1apVmDt3LoYNGwZBEPDEE09oPvbiiy+GIAj44x//mHF7R0cH5s+fj/r6ejQ2NmLhwoXo7e1VfxGX6OqPYfWn+wEAc2yWsADj9m5RFC2PFnAKKRArdDeW/c9baSJTpoS1EY+16JzMsCpQ5oJAi8Ery3SxzI59zU6+gh3mnlw4EzZlh6ITmZ2GqgqeDWB6Bjny4Lg2GODt74UqZbELl09Ifad6YlpJs2Mvs9PKjQUzs1ysASDkkWBHOTIing5+/D7Bsn0F4+jhjQC8IVKOJZI8gDN73AuCwAX7WpqygWiCnw/KQrMzZcoUnHTSSQBSZazly5eju7sb69atwzHHHGPptfr6+jBx4kQsW7ZM93GPP/443njjDQwbNizrvvnz5+ODDz7AihUr8PTTT2PVqlW46KKLLG1Hvnl+427EkyKObK3DoRYdd+UYdTyFY0leHil0sMMOKivOwE5gdwgoYM9BeVOOmR2rred2g1dJoGyvG6sqzyaR3QWci8VoVoj2ndDsCIIgee2ouCh/JBsr4vMJqA2yuWWFEbL2pC9cNaEABEHgnjBfHBjggTQj1zLWUJmLslz3YWc2Wz5gQamyjBW16Z4sh2V2tuzrK7hppBL59cHKcc87sjQC8Y501SCY7vYsNiz/+k4++WR0dnZm3d7d3Y2TTz7Z0mvNmTMHv/nNb3DWWWdpPmbHjh249NJLcf/996OiIvNktXHjRixfvhx33nknpk6diunTp+OWW27Bgw8+iJ07d1ralnzCurDsCpMZRtkTVsIK+ISC/xjdNhUshEB5b08E+3qj8AnAEUNyK2NFE0lTug27VgIss8P2j1V/E7seRGaRMjuFC8rzodkBtL124okkPt7NOvdSvxcW3BU6s8POB/WVFThkUA2A7CzE3nSwNsimQHlwbQg+AYgnRezrk/aFpNnxRmZHq4xlpxOL0VwTxPCmlIu027odtkARhMyZikYYzcfq6GV6nQrXuupywfK3+/LLL2cZCQJAOBzGK6+84shGMZLJJL773e/iZz/7GcaPH591/+rVq9HY2IgpU6bw22bOnAmfz4c1a9Zovm4kEkF3d3fGX77ojcSx6pO9ABwIdgxEo/KxAoX+MdqdMyWnOxzD9+9+E4+u+8L0c+wOAQWsl2rYKv3glhrbhnShdOu5KGq7ucphmR2rpon1iiCi0qrPTp7LWIU0FGQoRZVOZHYA7Y6srfv7EI0nUR30Y0S6KaGuwMFOryLYAWTDKxUX5VxbzwN+H+/kkrefR3g3ltuZHXUXZZbpseOxI8crQ0HDUWnwr5XrAOvIYgskJfv7JB+mYsT0t7thwwZs2LABAPDhhx/yf2/YsAHvvPMO7rrrLhx00EGObtwNN9yAQCCgaVTY3t6OIUOGZNwWCATQ3NyM9vZ2zdddunQpGhoa+N+IESMc3W45L2/ag2g8iYNbqm2Z0MnhZSwNcWunxYGRTsJWELlodl76aA9e2rQXd6z6zPRzcsnsWM1esMnVdktYADJmaZlpP7frI8S6sRhWNTss65Hv1vNCDAFlZGV2KhzK7PBgJ3MRuDH9ezmyrY5rQVgmi018zze8600l2Nkgc/xNJEV0pC9mg22WsQD16efSxHN3MzvSyAhFGSuHURFymB5KaxxHobDqnsw4pCWV8fvj85+oZnfYYroYxcmABQflY445BoIgQBAE1XJVVVUVbrnlFsc2bN26dfif//kfvP32245nKZYsWYLFixfzf3d3d+ct4JFKWENz/hxGglq32s4BZ8pYW/b1Acju5tCjkA7KG2X6C7tkBDvxJGBwXWEXRasZkKzMjsVVdVXeu7HYXKxCanYyjwsnxkUAUrCzV6HZ4fou2e+FfY/dhSpjRVUyO7xNWspoH+iPgslschGftjVU4t0vujLaz73SjcVGRmj57NgZFSFnwkGNALxTxrK6v38+Zwxe3bwPm3b34CcPv4tbz/tSxjVrf2/xtp0DFjI7W7ZswaeffgpRFPHmm29iy5Yt/G/Hjh3o7u7GBRdc4NiGvfLKK9izZw9GjhyJQCCAQCCAzz//HD/5yU9w8MEHAwDa2tqwZ8+ejOfF43F0dHSgrU27ZBQKhVBfX5/xlw/CsQRe+ii1fbmWsABjjQk3FHQhs2N39IKcrelgpzscNzWAMhxL8K4mW7OxLF7QeWYnhwyd3yfwk64ZkXKvzZEKSt8hy91YwfwKlN1oPZcPAw36fZYn1mvByj7KbizeuSfLBLKgo2BlrHB2sDN+WD0EIeUkzUpv7L9N1RUI5JDhYNPPMzI7XitjKUzz2ET6XMtYRx2Uuo58vr+fm526gdVREYzW+krc9t3JCPp9WP5BO/784uaM+4u57RywEOyMGjUKBx98MJLJJKZMmYJRo0bxv6FDh8LvdzZq/+53v4sNGzZg/fr1/G/YsGH42c9+hueeew4AMG3aNHR2dmLdunX8eS+++CKSySSmTp3q6PbY4ZVP9qE/msCwhkpMTK+mcsEoe+LWxHPAmcGRLLMDZFvOq8E6sXwCeJeLFeTeQEZi4Vgiic177I+JkGPFa0dNc2GG7MyOTc1OvjI7Lmh22DBQAKh2SK8DSI7Dym6sjbvUMjup97fTjWVlxAiDDQGVl7HqVETKbNyF3U4sBjNO3N2VndkJuS1QZmUsZWbHoTJWY3UQI9MDUN1sQbfqsSPnSyOb8F9npvSxv1/xMZ7/cDe/jy2mi7HtHLAhUAaATz/9FJdeeilmzpyJmTNn4sc//jE+/fRTy6/T29vLAxkglT1av349tm3bhpaWFhx11FEZfxUVFWhra8ORRx4JABg7dixmz56NCy+8EG+++SZee+01LFq0COeee65qm3qh4UaCR7U5Uooz7sZys4yVm4OyKIqWgx27hnsMts2iaOx7s2VfH6KJJGpDARzUWGX5veQELbSf223RVgYRXh0XUcjMDpAaBgoANQ51YgHqAuXusDRd/UhZJpBplLTae7V4ZO12HHXNc3jxo93GD5YhdWNlfv8T0rqd99Ni2lzbzhlDVUZGeGFcBAAEjVrPA7mfo70wFNSuZodxzrEj8b1powAAlz+0ni/yyqaMxXjuuecwbtw4vPnmm5gwYQImTJiANWvWYPz48VixYoWl11q7di0mTZqESZMmAQAWL16MSZMm4eqrrzb9Gvfffz/GjBmDGTNm4NRTT8X06dNx++23W9qOfMHSfrNzcE2WY5Q9kXdjFZpcNTsdfdEMHUN7t3Gwk4teB8g8+RptN3PClYtN7RKyYCzYazMoyL2MlV8rATdMBQHJr8gpvQ6gHuwwvc5BjVUZv0+73VirP92PaCKJt7YesPQ8nhlU7GdmgsfEtDzYsdl2zmhVGRkhCZS9UcbKR+s5Q+p068z5tezC52Ll8Bu/6hvj8OVDmtEbieOie9eiayBW9GUsy2eaK6+8EldccQWuv/76rNt//vOf4+tf/7rp1zrxxBMtzYjZunVr1m3Nzc144IEHTL9GIbnn+1/Grq4BDKmrdOT1jAaBdrnYjZWrg/LW/X0Z/1YOE1QjlyGgQOrkFvAJiCdFw+3mTrg5dtQBmV47RtgtYzmV2YklRMQSSUcuBHJ4ZqfAflAsBV/jaLCTes0D/TG+rz7iYyIyfy+8jGWxG4tlMbs12oK16I1kd2MB2W3Se3McFcGQZ3ZEUYQgCJ4RKBu1njvxG5/ggbERUmbH/uep8Pvwl/lfwum3vIrP9vXh8gff4QFx2ZSxNm7ciIULF2bdfsEFF+DDDz90ZKNKiaENVY4NOpQclPVNBQvtngxI9Xi7mYDP9mYGO2bKWCyzY2cIKMNsuYZfvHLU6wCSEDJiIjC060dT4fdlzH5igmOzWMl62cFuxipXWInXKUNB9prsGGer340aTtt2BcpMj2e1i0tpKshgIuX27jD29IR5iSLXMhabfD4QS/Bt5T47Lg4CBSSH5LjGINBcHJQZ43UcqgsFs9KwW8ZiDKoN4fbvTUFlhQ8vbdqLrfv7AZRRsDN48GCusZGzfv36LM8bwllY9kRLNNrJRwsU/seYa9mD6XVY1sNMGaszxzIWYGzUyGCZnbGOZHZS72mU2RFFMacWbblI2eqqOhTw8c6lx9/Zgcff+SLr73/X79A0IDPCrTIWz+w4KFD2+QT+uqz9/CNe9swMju2WsXLN7Ch/PzWhAA5Pj655f0cXX7Xn4rEDpH5nbLHFFizey+zkr4zVUFWBg1vcFSlbnXiux1EHNeCGeRMybivWYMf0mebXv/41fvrTn+LCCy/ERRddhM8++wxf/epXAQCvvfYabrjhhgzvGsJ52MlCq4vHrYnngL2hmnJYGWvKqCa8/ul+S5mdXDJZlSZmQHUNxHhZbbQDwY7Z+ViRuDTQz05QUF8VQLr72fKFRhBSI0d6wnFc/b8faD5uRHMVnrxkuiXR4ntfdKEvHbDnEqjaoSVdpnE6ozSoNpQeJxJBMilyzY4yOLbbjcVKtlbnLmmVsYBUKeuTPb3Y8EWXTLOT+7mjrb4Snf0x7OoawJFtdZ4xFZQ0O/krYwEpPdTW/f14c0sHvjZ6sCOvaQW7PjtanHHMQfhgZzduX/UZ/D4BjQU+Zp3C9Bn0uuuuw8UXX4yrrroKdXV1+P3vf48lS5YAAIYNG4Zrr71W0+mYcIYqnYAimRT5xd8VzY5Mh5JIipZLd6yMNe3QllSwYyKzk8sQUAbfpzot1p+nA7HBdaGslm47mO3Gkk8fttM9JN9WOyntX546Fs+8t0vz/o/ae7C9YwCX/vMd3PP9Y035s+ztieCif6wFAJwyrrXgnR1nHHMQPt7dy7tNnIJpXfb1RvHFgQH0RRMI+n28xZthJ7MjiqLtzI5WGQtIdQ499s6OVGbHodZzIFXK+qi9h4uUPeezo1HGytVnhzFz7BA89e5OPLruC1w+84icfIvswDL/1Q7q0n4+ewyCfh9a60MF/zxOYfoMyoTEgiDgiiuuwBVXXIGentTqpa4u99UuYYxex1NPOM4dUN00FQRSmScrmohkUsTn6XrwtMNagBWp7hAjYWyu3VhApteOFts6UtvGPDRyJWTSZ0duCGenA0yuZbKzyjv3yyNx7pdHat7/UXs3zlr2Ol7dvA/XP/sRfvWNcbqvF40n8aP712FXVxiHDq7BTd+eaHmbcqW1vhI3fcv59x0s68hiTttHtNZmXRjsBDu9kTgS6YPbqmZHzVSQMSHdJv3uF11cE+REsKNsP+c+O263ngfUHZSlcRHOaCtnH9WG5pog2rvDePGjPTjFoW5cs+Tis6OF3yfgp7OOdOz13MBSiKb0iqmrq6NAp4CwlVE8KWatTlgJqzrod8W8K1PQaq2UtbsnjIFYAgGfgAnDG1HhFyCKwB6FSZsSdoJ2ItgZiGpvMwt2Rjkc7Bhldnp0LlRmkM+dcvLExxjTVo/fpwOWO1/dgsfe1h/get1TH+CtrQdQFwrgju9NcSRL5hUGyYwFP1IxE2SwMtZALJF1DGsh10U51Y0FAOOGNsAnpLJtrFzakmM3FpDdfu61qedamh2nMhahgB/fmjwcAPDAm9sceU0rOF3GKhUsfbujR49Gc3Oz7h+RP/Q6ZNzU6wCpyJ+lga2KlLekS1gjmqsRDPj4ybLdYEaWlNmx/5nNdGNtS2edRjgU7JhtPe+xOReLIc/s5CPYAYBTjx6KRScdDgC48rH3MoZLynlgzTbcv2YbBAH4n+8cg8PS4thSQSpjRVTHRDDk36XasEU1OmWjByLxpCnnbSCVjWfaKLWAuSroxxFDMg0PnVgoZWd2vFHGCvg0xkU4rNkBgO+kM6IrP96L7enFUqGwOy6i1LF0Fr3uuuvQ0JD72APCHiFZ62Y4loTcvkfqxHJvtRyq8CGaSFoPdtKaGKZvaKuvxBcHBtDepZ/ZcaaMZRyg8cxOi0PBjt+kZsemezJDnjkJ5fFCs/jro7FxVzde+GgPfvCPdXhy0XQ+QgEA1m7twDVPvg8A+OkpR+LkMa152xa3kE8+Z87JapmdCr8PlRU+hGNJ9ITjpjonlR1vPeE4QrXGF7JwLMnLX1q/oaOHN2DT7lQmKldDQYa0WEkFOxGvCJQ1yliSZse5gdMHD6rBcYe34LXN+/HgW9vws1ljHHttI3J1UC5VLJ1Fzz33XGovdxFBEPiJUnlx7hxwN7MDpE5mPeG45REDLLNzcEs62OErQ6PMTioYcESgrLPNTE/klGYnaNJB2a6hIINlEQQhM1B2Gp9PwB/OPQZnLnsNn+3tw4/uX4f7/99XEAz4sKtrABff9zZiCRGnHT0UPzrxsLxth5u0pIOd7Qf6eXCs9Nhh1IYqEI5FTHdWdSqGSnYPxExpa+QC92qNC9/RBzXg0XWp8qMTeh1AGgbaniVQdntchHoZK+pg67mc+VNH4bXN+/Hw2i9w+czRjr++FvnQ7JQCpve+E7OdiNzRaj8/0Od+ZseuizJrOz9kcCrYYWlwvfZzURSlbqwcPrORP1A0nuRB10iHMjusVGAU7PTYnHjOYGWsyoA/78dvfWUF7vjeFNSFAnhr6wFc99QHCMcSuPgf67CvN4IxbXX4729NKNnzCCtjfb6/H6KYChy0god6iyJltpBhmBUpszJZTdCvKXA/WjagOFePHUZbOrPT2R9DOJaQ+ex4oxtLs/Xc4QXB18e1cksC+UDNfMM1O1TGysD0t2tlrAORP7RclDtddE9msG2LWMzsfJY2FDw0XcbiaXCd9vOBWIJrXnLxfWCBh1Y2akfnAJJiapXk1MXAaut5Xcje52NlrELV7g8bXIv/+c4xEATg/jXbcNZfXse7X3ShsToVCDnpWuw1lL8NNb0Oo87iMFC1zI4ZtOZiyRk3tJ7bROQ6KoJRXxXgWYUvDgwgni6leVeg7Lxmh73et6ekhMr3rymcULnfIQflUsP0t5tMJqmE5QG0TPDcnHjOMNPGrSSeSHIB38GDWGYnnQbXyewwHUPAJ+TkJ8ECAa1uLHnbuVNZCbPBTk+ump2q1PMKedI7eUwrfvL10QBSw1P9PgHL/uNLjom7vUpzTRDyn4feDDWr87GUmh2z5S+9TixGZYUfRwxJicWdKmMJgsCzs5/LZt65Xcaq0Jh6zoMdh8b6yPnOl0dCEIBXN+/D1n19xk9wAKfGRZQaxekOVMZoee24OfGcoWd6qMWOzgHEEiJCAR+GpjM6bQ2pk65eZkcuTs4lCOGZMo0Abdt+qVPMKfhsLIOgMJdREQAwurUOwYDPkeGlVrjkpMNx5jHD4BOAq04bi+MOH1TQ93eDgN+XsdBQEyczrHrtsKwto3vAWhnL6Pdz8pjUIlZe0soVlp3dIrvA51M3ZgaWuYlnlbHSmp08bN+I5mp87YiUi/I/3ypMdsfJcRGlROnmlUuUkEZA4aZ7MiNkMLtLDVbCOrilhusK2tKZnd3dYSSToqreINeJ5ww2IFPLQdlpQ0FA2k+my1g2Mzut9ZVYs2SG7cyQXQRBwB/PnYRr5o4vuEOymwyqDfJBoFriZMD6MNCsMpbFzI5RsPPTU47E+ccdjCHy9s4ckTI7qeMnGPDZMsZ0EkmzU5gyFuM/po7Eyo/34pG1X2Dx10fn3QeNurHUocxOkVGl0Srtts8OYK+MxVK7clv9IXUhCEJqxdXRrz45uNOBieeALBulsc3sZO1U2zkgaz038NnpzVGgDABNNcGCdYGovXc5wcpAfp+Aw4do+whJ87HMCpRTv/Uh6dZwq5odvTIWkOqmczLQAYDWdLDDmg/cnngOGJexnGw9lzNjzBC01ofQ0RfFcx/kV6icTIqe6X7zGu7/AglLaJax+nLvTMoVO8NAWZqbdWIBqRUWu3Bo6XacGAIKSJkyrWxUXjI7ZjU7fGVeOk7DpQz7zR42uEZ39S6VsUxqdtKZHfYbNJvZMVvGygdDlcGOBy68TCuX1Xoez0/rOSPg9+GcY1Mmgw+s+Twv78GQd3g6ORurFKBgp8iQNCbq3ViuZnYC1h2UebDTkjkwkbWv7tIIdpwYAgroOyiLoigFO05mdkz67OQqUCYKCwt29PQ6gA3NTrr1nAc7JjU7enOx8g3T7Ow4kLJt8EKww7ux4vmdeq7GuceOgE8A3visA5v39ObtfeTnMS/scy9BwU6Rwb1sZJmIaDzJbeHd1OwwQZyV1nO1zA4gGQtqiZSdcE8G9LNR+/ui6I8mIAjA8KaqnN5HDlv1G2t2chsXQRSWU49uw+jWWnwr3W6shdXMDtPsDE8HO2af1xtJHYdGZax8wDI7bDKD2x47QKpzEwBiyczjLp7+dyBPZSwAGNZYxYXg/8zjvCwW7AQDPm4pQKRw/xdIWEKtjMVWfj4Brg5XrNTJkqgRjiW4tf7BisyOZCyo7qLc6ZRAWcdBmel1htZXOioqNO2zwzQ7LlysCOtMObgZ/77iBByf7r7Rgml2ek3MxgrHEjwDKJWxrHZjFX6FzxYrDC9kGSo0ylgs0xPMs7btP6amSln/evsLyyN1zDJAbeeaULBTZKiJgOUXfjc7HqQyljnNzvaOlNtsXSiQZWgmzddRn4/lVGaHd2OpnHyY/4/THjG89VxHoCyKoilTOKL4sFLGYse23ydgWDqAsGwq6EKwPKgmxDMpgPuGgoBsXISijBUtQBkLAE4YPQQHNVahsz+G/3tvV17eg0ZFaEPBTpGhVnY50Oe+xw4gb4s3t2rhbeeDarK8cnhmp1s9s+NUsKPnoJyPTixAptnR2U+ReJL7f7hxsSLyh5VuLJa1bayq4J2HTpoK5gufT+ALFiC/g2jNYuignOeOMb9PwLnHjgCQclTOx1QC8tjRxv1fIGEJtSndBzww8RyQZ53MZXbU2s4ZbfX687Gcy+xod2PloxMLkHVj6WR22IVQEICaEh6zUI6w4NVM0MKzttUV/LfutKlgvmitlxyZPVHGSmtytH128p8V//axI+D3CVj3+QHMv3MNPt3rrFiZjYrwwv72GhTsFBlqupguD0w8B8xNEJezRS/YMRgG6lQ3ll6Atq0jtX0jW7K3LxfMaHZ4CSIYcN2MjXAWNgi0NxI3XN2zYKexqoLr8QZiCUO9F3t9wL0yKBv7Anjj4qud2clv67mc1vpK/NcZR6GywofXP92POX98BTev+NgxDY+k2aFLuxLaI0VGJS+ByMpYnsnsWGs9NxPs9EUTqt0nndxnJ7cAjwVo0XgSiWTmhSdfmR1TwQ61nZcsrIwliuBdlFqwhUxjdTDjt2CmI8vNMhaAjDKWN0wFtcZFFEazw/iPqSOx4ooTcOKRgxFNJPGnFz7B7D+uwquf7Mv5tcNUxtLE/V8gYQn2I84sY3kjs6NleKiFXrBTHQzwFbAyuyOKonNlLNmKU77d4VgCu7tT4uh8lbH0fHZynYtFeJfKCh8X7xoFLfLMjt8n8M48Mx1Zbpexhso6sryR2XG/jMUY0VyNu88/Fn+Z/yW01oewdX8/zrtrDS578B3s7VFvyjADjYrQhoKdIkO1G6tPOiG6iZTZMZdi35M+qA9WCXYAKbujNBbsiyZ4FiZ3gbJ0CMiDHdaJVRcKOO5dZMZnpyfHuViEdxEEwXRHFstgMmd0LlI20ZHlZjcWkNl+7gWfHaMyVr5bz5UIgoBTjx6K5xefgPO/ejB8AvC/63fi5N+/jOc/tDdWYoA0O5q4/wskLMEulPKAgndsuDyLiLs7m8jsMHFyS01QM2BhA0GVxoIsqxP0+3I+ifp8Ag945DqobbK281ymqqsRNCFQlspYNCqiFKk1aSwoZXZSxzYLkozEzZF4gl/E3SpjtXkssyONi1CUseKFLWMpqauswLWnj8cTlxyHow9qQE84jiWPv5cVlJmBMjvaULBTZFSqTBZnmh033ZMBWeu5iUGgeiUsxlCNjiw2GqO+qsKRQEQqDUonl3y1nQPSCjKRFBHXOKHxiedUxipJ6kLm2s8lzU46s2Oybb0vIh2DNS7pN9rqvRXsVMiOO7k+L1qg1nMjJgxvxL9++FUMqg1hb0/EVnaHLTRpLlY2FOwUGeqmgt7Q7FTpjF5QslXmsaNFq8bICKeGgDLUMlL5EicD0goT0M7ukGantDFdxlI0H9RXpTM7BmUslhmsqvAj4FLGIsNnxwMCZfk4CHnWhGt2PND1GAz48O30uJEHbIyV4GUsCnaycP8XSFiCBTtq3Vi56ldyRW1ulxamMjsa7edOtZ0zuNeOWrCTh8yO/MSvpdshzU5pY9ZYUDkWhWV2jMpYbndiAakLN3NG90JmR67JiaczO4mkyOd3uVXGUvKdL4+EIACvfLIPn6enxpuFyljaeOPbJUyj9LIRRVHK7Lit2bFQxvrMRLCjZSzoVCcWI6QyrT2fmZ2A3we2iNQKdqj1vLQxOwy0S2GxUG/SWLAv6t5cLDksu+OFzI48mGE6HXmGx+0yFmNEczW+lp6vZjW7Q8GONt74dgnTKL1s+qOSENFtzQ4LdmIJMcuzRsnW/SaCHYMyluOZnXRGKpkUebAzqtlZQ0FG0KD93O1OGiK/1MmMBfVgC5lGntkxJ1D2SrA8/YhBCAZ8GD+swdXtAFLjGtgigwU5GcFOAVvPjWBDQx9d+4UpA0kG+exoQ8FOkSF3UBZFkXvsBAM+16N5eWeUXkfWgb4oT88rp53LYZmdjr5o5pR3h8t2VQoX5T09EUTjSfh9AoY2Vuo91TZ8GKhWGStMZaxSxoxmJxpPctPBRout57yM5fKokSVzxmLDNadg3LB6V7eDwbI7UR7sSIuyCp93LoczxgxBa30I+/uieO6DdtPPo9Zzbbzz7RKmYGLapJg6UDtlnVhOt0hbRT7ZWC/Y2ZLO6gxtqNRdgTRWV/D0955uyWjL6cwOL7+lTxSsTn5QY1Xe6vghmXOzGr082KHW81KkzoT2hv3OBUF6vKTZMerG8k5m0EsXXj75PB3ksMxOwCd4aixLwO/DOcemsjv3r/nc9PP6o1TG0oKCnSJDPj04HE/wzA7z4XATn0/g5Rm9YaBb9hqXsICU6dZQbiwoTT93vIylmDeWT70OI6hYYSrp8dDFinAe9r3qZXZY23l9Zco9GbDQjeXyXCyvwjqy4ooyVsBDJSzGuceOgE8A3visw/TA0DBpdjShYKfICAV8YAmccCyR1ZrqNmwGjtoUcQbT6+i1nTOYwFGu23FcoKzQQeWzE4u/p8F8rN5IuvWcLlYlCdfs6AQ7asd2MXVjeRGtMpZXOrHkDGuswsljhgAA/rnGnFB5gDQ7mrj6Da9atQpz587FsGHDIAgCnnjiiYz7r732WowZMwY1NTVoamrCzJkzsWbNmozHdHR0YP78+aivr0djYyMWLlyI3l5zUXAxIggCLxdFYknPeOwwzMzHYp1Yh5oIdtTaz5322XEls8MFyur7iWt26GJVknBzwIh20CKfi8WfZ7YbizKDqlRolLEKPSrCLFyo/PYXppzp2TnMS6VDr+DqN9zX14eJEydi2bJlqvePHj0af/7zn/Hee+/h1VdfxcEHH4xTTjkFe/fu5Y+ZP38+PvjgA6xYsQJPP/00Vq1ahYsuuqhQH8EV5B1Z3D25xiOZHeYDpNN+zspYeuJkhpqxYL7KWMwMkbkn5zPY0cvsiKJImp0Sx4xAWZqLJS1kzLasUzefOtLIiNRxF3V5VIQRJ4wegoMaq9DZH8Oz7+8yfPxANPV5qIyVjatHwpw5czBnzhzN+//jP/4j498333wz7rrrLmzYsAEzZszAxo0bsXz5crz11luYMmUKAOCWW27BqaeeiptuugnDhg3L6/a7RSqgiGEgJtPseCSzY+SiLIqi1HY+2ERmR8VrJ28C5fSqaHsBMztqwU4knuSmZ1TGKk3MmAoq284BKSPUF00gnkhquiP3psdFUBkrE9ZervTZqQh4T7MDpNrlzz12BH6/4mM8sGYbzpo0XPfx1HqujTfDWRWi0Shuv/12NDQ0YOLEiQCA1atXo7GxkQc6ADBz5kz4fL6sclcpUSkLKNRS3W6i9AFSsqcngv5oAn6fgBFNxsGE0msnmRTz56AcTaA3Esf+vtRFJp+aHb1hoEyPIQhANa3QSpJaE5qdbpVyrdyKQC9QkspY9PuRo6nZ8VDbuZJvHzsCfp+At7YewMe7ezQfJ4oiL2PRbKxsvPsNp3n66adRW1uLyspK/OEPf8CKFSswaNAgAEB7ezuGDBmS8fhAIIDm5ma0t2t7E0QiEXR3d2f8FRPyTITXNDshhf5FCRsTMbypKmNGlBZ88nk6s9MTiXN793qnMzvxBLalS1hN1RV8FZ0PuM+OSgaMG8IFA55qhyWcgwUt0URSc2HAyljyhUzA7+ODPfVEyvw3FPLGIsgrsExYPB3ksK4sr5axgFSTxsyxqevcAxpC5U/39uI/7ljDzVwpo5eNd7/hNCeddBLWr1+P119/HbNnz8a3v/1t7NmzJ6fXXLp0KRoaGvjfiBEjHNrawqCm2fFMN5ZBGcvMTCw5zFhwT08ECVlWp7LC55gITz5JvhDiZAAIMZG5Smanl+ZilTy1wQDvqtTK0HDzTMVCxoxIWerGohW+nCArYzHNjsfLWIz5U0cBAP719hcZna7hWAI3/3sT5vzxFaz+bD8qK3y4Zu440mqp4Plgp6amBocffji+8pWv4K677kIgEMBdd90FAGhra8sKfOLxODo6OtDW1qb5mkuWLEFXVxf/2759e14/g9PwKd3xpGfmYjEqVeZMyWHBjhlxMgAMrgvB7xOQSIrY1xtxXK8DZDoob+tIbd9Ik9tnFz3Njles/on84fMJqA3qi43VMjuAufZzaTYW/YbkFFPruZzphw/CyOZq9ITjeGrDTgDAK5/sxaw/rsKfXtyMaCKJk44cjBVXnIDvH3eIy1vrTYruSEgmk4hEUm6606ZNQ2dnJ9atW4fJkycDAF588UUkk0lMnTpV8zVCoRBCoVBBtjcfyKeLH5A5KHsBo9ZzFuwcakKcDKQEekPqQtjVFcaurjDXIjgZ7MgdlKXMTpVjr6+GXrDTHaYLVTlQWxlATySuOR+rizcfKIIdE8aCFDCro9V67vVgx+cTcO6XR+DG5Zvw99e34tVP9uHJd1NBT2t9CNfOHY/ZR7W57qLvZVw9Enp7e7F582b+7y1btmD9+vVobm5GS0sLfvvb3+L000/H0KFDsW/fPixbtgw7duzAt771LQDA2LFjMXv2bFx44YW47bbbEIvFsGjRIpx77rkl24kFSBfnvmicr+4aPOCgDEhZkrtf24rl72frpjbuSumjzGZ2gFTNeldXGO1dYSTF1EkqH5mdgViCt53nawAoQ89nR3K/9UYAS+SHusoAdnXplLE0/KTMZHa8MhvLa0jBTmY3lld9duR8a/II/GHFx/hgZzc+2NkNnwAs+OrBWPz10WRRYQJXj4S1a9fipJNO4v9evHgxAGDBggW47bbb8NFHH+Hvf/879u3bh5aWFhx77LF45ZVXMH78eP6c+++/H4sWLcKMGTPg8/kwb948/OlPfyr4ZykkLNjZ0xNB+trvGc0O62Da0TmAHZ0Dqo8J+n0YO9T8YMChDZVYvx1o7xpAMF3CczK4k2ejtnekLiAj8q7Z0StjpbaBNDuljdR+rlHG0hh4a6TZiSWSfMAsZQczqVBodliGx4vjIpQMrgvhjGMOwqPrvsCE4Q347ZlH4+jh7k+TLxZcPRJOPPFEiKKoef9jjz1m+BrNzc144IEHnNwsz8MuzrvTHUp1oYBn0rAXHn8ojj6ogQ+kU+OwwTUYXGe+jCiNjIjwE7+zZazUvuuNxLG3J1UiHZXHtnPAQLMTIffkcoAFs2pDPRNJUTNrW8+fpx4k9cnKYtSVk0mxlrEYvz3rKJz3lVE4+qAGPi+NMAcdCUUIuzjvTA/HbPSIezKQuoh/bfRgR19TGhkhZYocLWOlW3nbu8MQxVTmiQVY+SKkMwi0hzQ7ZYHeMNCecIxnbbUyO1rlLxYsBwM+U/YO5UQxl7GAVBfnMSMa3d6MooTOpkUIy+ww7xmveOzkC7mxYFVag+BoZiddGmMXl+FNVXlfNXHNjkqLfg9NrC4LWBlLzViQlbBqgv6sgIVnhDQEyjQqQptgINNBWRoXQVmSUqc4wlkiA3Zx3pUOdpy88HuRNtnIiK4B9Q6VXFBaq+fTOZnBfHbUMjs0F6s8qNeZcyWJk7MXMkYCZRoCqk12Zqc4Ws+J3KFvuAhhZSwmQiyXzM6urnBefHaU5oT5NhQESLND6A8DZf5Zar9zI4EyzcXSRvLZUWh2qNxX8tA3XIQoMxFe8djJF0w/E4kneWt4PgTKjEIGOxGVYIet9KmMVdrwbqxIdoamS6PtHDDO7PDMIAU7WbCuKzYmgo+LILFvyUPBThHCylgMr0w8zxeVFX40px2ivziQEik7NRcLSIkT5ee6ggQ7fm2fHRIolwd6AuVOnTEwRqaCfTQqQpOgoowVpTJW2UDfcBESUmQiSj2zAyCrO8rJzI4gCNxYEABG5XlUBCB9h7plLMrslDT6ZSxts1Aps6PfjUVlrGyojFW+0DdchCg1JqWe2QGk9nOG0yaK8n06Is+jIgBphakqUKZgpyzQMxXs1BHis6xmbyTOyzBy6PejjVbrOWV2Sh/6houQ7GCHMju5wvbp4LoQqgtgsa8lUBZFUVbGKv3vtZzRy+x09asPAZU/D4DqXK0+GhWhSbaDMvPZIc1OqUPBThFSVaEUKJdXZqc66Hd8JcZEyoXQ6wDaAuVwLIlEMpViJ4FyacOCFrWARWsuFpDKQlSnmxTUOrKojKUNO+64ZifOxkXQpbDUoW+4CFF2D5VDsNMmy+zkw1eIdbiNKlCww312FMEO68wRhJShHFG6sDJWfzSRVY6SWs/Vj229jiwqY2kT8GWOi4gnqYxVLtA3XIRklbE8NC4iX7Q15DnYSe/TfA8AZWgNAu2VdWIJAqXWSxm9cpReZkf+XLVgp48yO5pQGat8oWCnCJG3nvt9Qln4aQzNc7DDWtuPaK11/LXV4JodxYq+hzxSyoYKv49naZW6nS6d1nNA31iQyljaaJWxKLNT+tDRUITIy1iNVRVlkQFozXOw84tTx2L6EYMxa3yb46+tBvfZiWX67PTSXKyyojZUgXAskhHsiKIoZXY0y1jamR1y4NaGd2PFi3PqOWEfOhqKkJCsjFUOnVhA6sRdE/SjL5rIS7AzqqUG3y2Avw6D++xoZXZoLlZZUF8ZwL7eSEb7eW8kzkXqxpkdtTIWjYvQokJh+UA+O+UDfcNFiLwbqxzEyUDK+I9ld0ohwJOcXEUk0xc2QDYqgi5UZYFa+zkzFAwFfFn6PIaesWAvOShrwsdFJNm4iHQZi8ZFlDwU7BQhFX6BjzcoB0NBBtPtlMKU96BsJSnP7lAZq7xQm4+lNxeLoTcyQpqNVfzHidMEFWWsKJWxygb6hosQQRD4iq8UshxmOfqgRgDA6NY6dzfEAeTBjtxrh4Y4lhfca0cls6Ol1wG0W88TSREDMVbGosyOEk0HZSpjlTx0Ri1SKiv86I8mymIuFuOnp4zGOceOwCGDCqetyRdB2UpS3n5OHinlBStXystRbFREg25mR70bqy8q/Zs0O9mw1vMszQ61npc8FM4WKZXplUg5lbECfl9JBDpAKjun1n7eTaMiygppPpZaZkcn2NHI7LAMUYVf4F5OhER2ZidVzgpSGavkoW+4SKlMu+uWi0C5FAn5s40FSbNTXkgCZXuaHaU/j9xQsBwsKawi+eykNTtx0uyUC/QNFynMWLCcNDulhjQfS/La6U1f9EizUx6od2OxiecmNDsKgXIvDQHVJeDLdFBmXVkBKmOVPHREFCmnTRiK/mgcxx7c7PamEDZRGxlBmp3yQm0YKCtj6XUdao2LoN+PPlTGKl/oiChSLjnpcFxy0uFubwaRA0GVYIet8KmMVR5Imh0paDGaiwVIAuXeSBzJpAhfOmNBc7H0UZaxYlTGKhvoGyYIl9ANduhiVRaolbG6TLSes+eJItAjywr1knuyLiyoSSRFJJKi5LNDYu6Sh75hgnCJUFp3FaEyVtmi2o01wDQ72pmdUMDPZ+TJdTuk+dJH3mIeSySp9byMoGCHIFxCEiinTriiKMqCHRKelwMsg6fWjWXkFK7Wft4XJUNBPeTlqkg8CTappcJHl8JSh75hgnCJoGIo4UAswQdAUhmrPKiXCZRFMfXdc58dg05LNWPBXtLs6CIPdgaiUhcklbFKHzoiCMIllJodZggnCEB1kFbm5QDL4CXFVFYm4BN4ps/IMLRepSOLxo3o4/el5gqm9rcUJFIZq/ShI4IgXCKk8NlhQtNaMoQrGyorfAj4BMSTInrCMQhIfe8Bn4Aag4BXyuzIyliU2TGkwu9DJJ5Ef0SW2aEyVslD3zBBuIRWZqee9DplgyAIGcNA5eJko4C3XkXcTGUsY1j5mGV2Aj6Bt+4TpQsFOwThEspgh9rOy5NaXo6KmzIUZLCRERllLOrmM4Tpc5hmhzx2ygM6IgjCJZQOyr2R1EWLDAXLi7pQBYAB9IRjCMfM6XUASe8jFyj30bgIQ9jIiP50sEOjIsoDOiIIwiWUPjuU2SlP5MaC/enSit7Ec4Za6zmVsYypUJSxaFREeUBHBEG4BC9jJVhmh0oQ5QjL0PRG4lxs3GBiwC8vYw1QGcsK7LjrT+8rKmOVB/QtE4RLcJ8dRWaHLlTlhZTZiUlzsXRGRTBUTQVpXIQhrM2cGTBWBKiMVQ5QsEMQLqF0UO6NUBmrHJGXsawJlDM1O8mkyEsz5KCsDcvksJIhZXbKA1e/5VWrVmHu3LkYNmwYBEHAE088we+LxWL4+c9/jqOPPho1NTUYNmwYvve972Hnzp0Zr9HR0YH58+ejvr4ejY2NWLhwIXp7ewv8SQjCOlk+O1yzQ63n5YQ82OkyMReLoTQV7I8lkDZhToueCTWkYCd13JFmpzxw9Vvu6+vDxIkTsWzZsqz7+vv78fbbb+Oqq67C22+/jcceewybNm3C6aefnvG4+fPn44MPPsCKFSvw9NNPY9WqVbjooosK9REIwjZZPjuktyhL5MNAzY6KALJNBVknlk8AHxJKZMPKWAPUjVVWuHpWnTNnDubMmaN6X0NDA1asWJFx25///Gd8+ctfxrZt2zBy5Ehs3LgRy5cvx1tvvYUpU6YAAG655RaceuqpuOmmmzBs2LC8fwaCsEu2zw61npcj8mGglspYLEiKxJFMihmdWOTArY3UjUU+O+VEUX3LXV1dEAQBjY2NAIDVq1ejsbGRBzoAMHPmTPh8PqxZs0bzdSKRCLq7uzP+CKLQKAeB0lyj8iSzjMUyO2Z8dlLPE9Nznuj3Yw5exqJurLKiaL7lcDiMn//85/jOd76D+vp6AEB7ezuGDBmS8bhAIIDm5ma0t7drvtbSpUvR0NDA/0aMGJHXbScINUIVaZ+dmLKMRXqLckLK0MTQ2Z/W7JjI7FRW+Lnuqzscp7lYJiGfnfKkKL7lWCyGb3/72xBFEbfeemvOr7dkyRJ0dXXxv+3btzuwlQRhDWVmhwuUqYxVVrAMzYG+GC+tmNHsAJm6HTIUNEcwkOmgTBPPywPPHxUs0Pn888/x4osv8qwOALS1tWHPnj0Zj4/H4+jo6EBbW5vma4ZCIYRCobxtM0GYQTkugmt26GJVVrBM3q6uAQCAIJjP7tVVBrC3J5IR7JDAXZ+AL7MbK0CZnbLA098yC3Q++eQTPP/882hpacm4f9q0aejs7MS6dev4bS+++CKSySSmTp1a6M0lCEvIBcqiKNLFqkxhmbxkum28vrICfpNTuCVjwTjNxTIJK2MNUOt5WeHqUdHb24vNmzfzf2/ZsgXr169Hc3Mzhg4dim9+85t4++238fTTTyORSHAdTnNzM4LBIMaOHYvZs2fjwgsvxG233YZYLIZFixbh3HPPpU4swvPIfXYGYgl+saNgp7xQft9mS1iAsoxF7slmYGWsPm4qSGWscsDVo2Lt2rU46aST+L8XL14MAFiwYAGuvfZaPPnkkwCAY445JuN5L730Ek488UQAwP33349FixZhxowZ8Pl8mDdvHv70pz8VZPsJIhfkmR2m1/EJQFUFud+WE7XBAAQB3BDQjDiZITcW7I2kyqAULOsjdWNR63k54epRceKJJ0JkR7gKevcxmpub8cADDzi5WQRREOSDQOUTz8kjpbzw+QTUBgPoSZehGky0nTPkIyOkuVgULOtRoWgMqAhQsFMO0LdMEC7BtAKReJLazssceQeetcyONAyUurHMoczkkGanPKBvmSBcgvvsxJOSIRyVIMoS+fduTbOTLmMNxMhU0CRKjU7ApBicKG7oqCAIl+A+O/EktZ2XOfKMnt3MDmulpsyOPsrMDpWxygM6KgjCJYKyk2xH2jmXDAXLE3lmx45mpyccp2DHJFnBDpWxygI6KgjCJULyYKc3HezQhaoskX/vdrux2NgRKmPpoyxjBan1vCygo4IgXEIujNzflwp2SKBcnmSUsSxodtjzugfiiKe7iyizo08wQJmdcoSOCoJwCZ9PQIVfQCwhyoIdOiTLkXqbAuWGKimzk0i7UlKwow+VscoTOioIwkWCfh9iiQT290YAUBmrXMnQ7FRZ0OxUSg7Kaq9FZKPsviIH5fKAQlqCcBGWUu/oI81OOWO3jMUEyklRmq1FmR19qIxVntC3TBAuEgqkvHb29VIZq5yRB7kNFgTKoYAvyxSvmsaN6EJlrPKEvmWCcBG2yjzQT8FOOcO+99pQwNLFVxAEbizInu8jkzxdyGenPKFvmSBchAU7TFxaG6JurHKElaOsZHX4c2UlMJqLZQy1npcntIwkCBdRliDIVLA8OWZEI2aObcXXRg+y/Ny6KnmwQ78fI5SZnYCP1vzlAB0ZBOEioYrMEy2VscqTygo/7lwwxdZz5W3rZChoDJWxyhP6lgnCRZSZHbpYEVapp8yOJZRlLGo9Lw8o2CEIF1G2wVIZi7BKpmaHfj9GKDM7ygUHUZrQt0wQLiKfj+X3CaiitmHCIlTGsgb57JQn9C0ThIswnx0g1TYsCJRSJ6xBZSxrkM9OeULfMkG4iHyVSe7JhB3kmR0qgxpD4yLKEwp2CMJF5HoB6sQi7CDP7FDAbAyVscoT+pYJwkUos0PkSoZAOUiaLyOo9bw8oW+ZIFxELlCmzA5hh4xxEZXkwG0EtZ6XJxTsEISLZGR26EJF2ECe2amlcRGGUOt5eULfMkG4CJWxiFyhbixrUDdWeULfMkG4SJDKWESOZGZ26DdkhN8nQN6QFaAyVllAwQ5BuIjcZ4cM4Qg7VFb4uO6Egh1zyLM5FTQItCygb5kgXCRTs0MXKsI6giBg5thWHDa4BiNbqt3enKKA6XQCPgE+H2V2ygE6uxKEi4T8pNkhcufW8yZDFEVy4DZJRcAHREivU07QN00QLkKaHcIpKNAxDyv7Udt5+UDBDkG4SKbPDrWeE0QhCKR1OpTZKR/omyYIF6HWc4IoPOy4o2CnfKBvmiBchATKBFF4eBkrQGWscoGCHYJwkYxBoJTZIYiCwDI6lNkpH+ibJggXCVXIfHZIs0MQBYEFOTQqonygb5ogXISdbP0+AZUVdDgSRCEIUman7KBvmiBcpLkmCABorQtR6zBBFAg2IoJGRZQPrgY7q1atwty5czFs2DAIgoAnnngi4/7HHnsMp5xyClpaWiAIAtavX5/1GuFwGJdccglaWlpQW1uLefPmYffu3YX5AASRI20NlbjtvC/hL+dNdntTCKJsIM1O+eHqN93X14eJEydi2bJlmvdPnz4dN9xwg+ZrXHHFFXjqqafwyCOPYOXKldi5cyfOPvvsfG0yQTjO7KOG4pgRjW5vBkGUDaTZKT9cbf+YM2cO5syZo3n/d7/7XQDA1q1bVe/v6urCXXfdhQceeAAnn3wyAODuu+/G2LFj8cYbb+ArX/mK49tMEARBFDfBADkolxtFHdauW7cOsVgMM2fO5LeNGTMGI0eOxOrVqzWfF4lE0N3dnfFHEARBlAdUxio/ivqbbm9vRzAYRGNjY8btra2taG9v13ze0qVL0dDQwP9GjBiR5y0lCIIgvAIPdgJFfQkkLFCW3/SSJUvQ1dXF/7Zv3+72JhEEQRAFgjso+6iMVS4UtWVrW1sbotEoOjs7M7I7u3fvRltbm+bzQqEQQqFQAbaQIAiC8BpUxio/ivqbnjx5MioqKvDCCy/w2zZt2oRt27Zh2rRpLm4ZQRAE4VWojFV+uJrZ6e3txebNm/m/t2zZgvXr16O5uRkjR45ER0cHtm3bhp07dwJIBTJAKqPT1taGhoYGLFy4EIsXL0ZzczPq6+tx6aWXYtq0adSJRRAEQajCgx0qY5UNroa1a9euxaRJkzBp0iQAwOLFizFp0iRcffXVAIAnn3wSkyZNwmmnnQYAOPfcczFp0iTcdttt/DX+8Ic/4Bvf+AbmzZuHr33ta2hra8Njjz1W+A9DEARBFAUnHjkYI5urcfLYVrc3hSgQgiiKotsb4Tbd3d1oaGhAV1cX6uvr3d4cgiAIgiBMYPb6TQVLgiAIgiBKGgp2CIIgCIIoaSjYIQiCIAiipKFghyAIgiCIkoaCHYIgCIIgShoKdgiCIAiCKGko2CEIgiAIoqShYIcgCIIgiJKGgh2CIAiCIEoaCnYIgiAIgihpKNghCIIgCKKkoWCHIAiCIIiShoIdgiAIgiBKGgp2CIIgCIIoaQJub4AXEEURQGpUPEEQBEEQxQG7brPruBYU7ADo6ekBAIwYMcLlLSEIgiAIwio9PT1oaGjQvF8QjcKhMiCZTGLnzp2oq6uDIAiOvW53dzdGjBiB7du3o76+3rHXLWVon1mD9pd1aJ9Zg/aXNWh/WSeXfSaKInp6ejBs2DD4fNrKHMrsAPD5fBg+fHjeXr++vp5+9BahfWYN2l/WoX1mDdpf1qD9ZR27+0wvo8MggTJBEARBECUNBTsEQRAEQZQ0FOzkkVAohGuuuQahUMjtTSkaaJ9Zg/aXdWifWYP2lzVof1mnEPuMBMoEQRAEQZQ0lNkhCIIgCKKkoWCHIAiCIIiShoIdgiAIgiBKGgp2CIIgCIIoaSjYySPLli3DwQcfjMrKSkydOhVvvvmm25vkCVatWoW5c+di2LBhEAQBTzzxRMb9oiji6quvxtChQ1FVVYWZM2fik08+cWdjPcDSpUtx7LHHoq6uDkOGDMGZZ56JTZs2ZTwmHA7jkksuQUtLC2prazFv3jzs3r3bpS12n1tvvRUTJkzgJmXTpk3Ds88+y++n/aXP9ddfD0EQcPnll/PbaJ9lcu2110IQhIy/MWPG8Ptpf2WzY8cOnHfeeWhpaUFVVRWOPvporF27lt+fz3M/BTt54qGHHsLixYtxzTXX4O2338bEiRMxa9Ys7Nmzx+1Nc52+vj5MnDgRy5YtU73/xhtvxJ/+9CfcdtttWLNmDWpqajBr1iyEw+ECb6k3WLlyJS655BK88cYbWLFiBWKxGE455RT09fXxx1xxxRV46qmn8Mgjj2DlypXYuXMnzj77bBe32l2GDx+O66+/HuvWrcPatWtx8skn44wzzsAHH3wAgPaXHm+99Rb++te/YsKECRm30z7LZvz48di1axf/e/XVV/l9tL8yOXDgAI477jhUVFTg2WefxYcffojf//73aGpq4o/J67lfJPLCl7/8ZfGSSy7h/04kEuKwYcPEpUuXurhV3gOA+Pjjj/N/J5NJsa2tTfzv//5vfltnZ6cYCoXEf/7zny5soffYs2ePCEBcuXKlKIqp/VNRUSE+8sgj/DEbN24UAYirV692azM9R1NTk3jnnXfS/tKhp6dHPOKII8QVK1aIJ5xwgnjZZZeJoki/MTWuueYaceLEiar30f7K5uc//7k4ffp0zfvzfe6nzE4eiEajWLduHWbOnMlv8/l8mDlzJlavXu3ilnmfLVu2oL29PWPfNTQ0YOrUqbTv0nR1dQEAmpubAQDr1q1DLBbL2GdjxozByJEjaZ8BSCQSePDBB9HX14dp06bR/tLhkksuwWmnnZaxbwD6jWnxySefYNiwYTj00EMxf/58bNu2DQDtLzWefPJJTJkyBd/61rcwZMgQTJo0CXfccQe/P9/nfgp28sC+ffuQSCTQ2tqacXtrayva29td2qrigO0f2nfqJJNJXH755TjuuONw1FFHAUjts2AwiMbGxozHlvs+e++991BbW4tQKISLL74Yjz/+OMaNG0f7S4MHH3wQb7/9NpYuXZp1H+2zbKZOnYp77rkHy5cvx6233ootW7bg+OOPR09PD+0vFT777DPceuutOOKII/Dcc8/hhz/8IX784x/j73//O4D8n/tp6jlBFBGXXHIJ3n///QxtAKHOkUceifXr16OrqwuPPvooFixYgJUrV7q9WZ5k+/btuOyyy7BixQpUVla6vTlFwZw5c/j/T5gwAVOnTsWoUaPw8MMPo6qqysUt8ybJZBJTpkzB7373OwDApEmT8P777+O2227DggUL8v7+lNnJA4MGDYLf789S3u/evRttbW0ubVVxwPYP7btsFi1ahKeffhovvfQShg8fzm9va2tDNBpFZ2dnxuPLfZ8Fg0EcfvjhmDx5MpYuXYqJEyfif/7nf2h/qbBu3Trs2bMHX/rSlxAIBBAIBLBy5Ur86U9/QiAQQGtrK+0zAxobGzF69Ghs3ryZfmMqDB06FOPGjcu4bezYsbz0l+9zPwU7eSAYDGLy5Ml44YUX+G3JZBIvvPACpk2b5uKWeZ9DDjkEbW1tGfuuu7sba9asKdt9J4oiFi1ahMcffxwvvvgiDjnkkIz7J0+ejIqKiox9tmnTJmzbtq1s95kayWQSkUiE9pcKM2bMwHvvvYf169fzvylTpmD+/Pn8/2mf6dPb24tPP/0UQ4cOpd+YCscdd1yWZcbHH3+MUaNGASjAuT9niTOhyoMPPiiGQiHxnnvuET/88EPxoosuEhsbG8X29na3N811enp6xHfeeUd85513RADizTffLL7zzjvi559/LoqiKF5//fViY2Oj+L//+7/ihg0bxDPOOEM85JBDxIGBAZe33B1++MMfig0NDeLLL78s7tq1i//19/fzx1x88cXiyJEjxRdffFFcu3atOG3aNHHatGkubrW7XHnlleLKlSvFLVu2iBs2bBCvvPJKURAE8d///rcoirS/zCDvxhJF2mdKfvKTn4gvv/yyuGXLFvG1114TZ86cKQ4aNEjcs2ePKIq0v5S8+eabYiAQEH/729+Kn3zyiXj//feL1dXV4n333ccfk89zPwU7eeSWW24RR44cKQaDQfHLX/6y+MYbb7i9SZ7gpZdeEgFk/S1YsEAUxVQL4lVXXSW2traKoVBInDFjhrhp0yZ3N9pF1PYVAPHuu+/mjxkYGBB/9KMfiU1NTWJ1dbV41llnibt27XJvo13mggsuEEeNGiUGg0Fx8ODB4owZM3igI4q0v8ygDHZon2VyzjnniEOHDhWDwaB40EEHieecc464efNmfj/tr2yeeuop8aijjhJDoZA4ZswY8fbbb8+4P5/nfkEURTH3/BBBEARBEIQ3Ic0OQRAEQRAlDQU7BEEQBEGUNBTsEARBEARR0lCwQxAEQRBESUPBDkEQBEEQJQ0FOwRBEARBlDQU7BAEQRAEUdJQsEMQRNGydetWCIKA9evX5+09zj//fJx55pl5e32CIPIPBTsEQbjG+eefD0EQsv5mz55t6vkjRozArl27cNRRR+V5SwmCKGYCbm8AQRDlzezZs3H33Xdn3BYKhUw91+/3l+0UaYIgzEOZHYIgXCUUCqGtrS3jr6mpCQAgCAJuvfVWzJkzB1VVVTj00EPx6KOP8ucqy1gHDhzA/PnzMXjwYFRVVeGII47ICKTee+89nHzyyaiqqkJLSwsuuugi9Pb28vsTiQQWL16MxsZGtLS04D//8z+hnKiTTCaxdOlSHHLIIaiqqsLEiRMztokgCO9BwQ5BEJ7mqquuwrx58/Duu+9i/vz5OPfcc7Fx40bNx3744Yd49tlnsXHjRtx6660YNGgQAKCvrw+zZs1CU1MT3nrrLTzyyCN4/vnnsWjRIv783//+97jnnnvwt7/9Da+++io6Ojrw+OOPZ7zH0qVLce+99+K2227DBx98gCuuuALnnXceVq5cmb+dQBBEbjgyTpQgCMIGCxYsEP1+v1hTU5Px99vf/lYUxdTE94svvjjjOVOnThV/+MMfiqIoilu2bBEBiO+8844oiqI4d+5c8fvf/77qe91+++1iU1OT2Nvby2975plnRJ/PJ7a3t4uiKIpDhw4Vb7zxRn5/LBYThw8fLp5xxhmiKIpiOBwWq6urxddffz3jtRcuXCh+5zvfsb8jCILIK6TZIQjCVU466STceuutGbc1Nzfz/582bVrGfdOmTdPsvvrhD3+IefPm4e2338Ypp5yCM888E1/96lcBABs3bsTEiRNRU1PDH3/cccchmUxi06ZNqKysxK5duzB16lR+fyAQwJQpU3gpa/Pmzejv78fXv/71jPeNRqOYNGmS9Q9PEERBoGCHIAhXqampweGHH+7Ia82ZMweff/45/u///g8rVqzAjBkzcMkll+Cmm25y5PWZvueZZ57BQQcdlHGfWVE1QRCFhzQ7BEF4mjfeeCPr32PHjtV8/ODBg7FgwQLcd999+OMf/4jbb78dADB27Fi8++676Ovr44997bXX4PP5cOSRR6KhoQFDhw7FmjVr+P3xeBzr1q3j/x43bhxCoRC2bduGww8/PONvxIgRTn1kgiAchjI7BEG4SiQSQXt7e8ZtgUCAC4sfeeQRTJkyBdOnT8f999+PN998E3fddZfqa1199dWYPHkyxo8fj0gkgqeffpoHRvPnz8c111yDBQsW4Nprr8XevXtx6aWX4rvf/S5aW1sBAJdddhmuv/56HHHEERgzZgxuvvlmdHZ28tevq6vDT3/6U1xxxRVIJpOYPn06urq68Nprr6G+vh4LFizIwx4iCCJXKNghCMJVli9fjqFDh2bcduSRR+Kjjz4CAFx33XV48MEH8aMf/QhDhw7FP//5T4wbN071tYLBIJYsWYKtW7eiqqoKxx9/PB588EEAQHV1NZ577jlcdtllOPbYY1FdXY158+bh5ptv5s//yU9+gl27dmHBggXw+Xy44IILcNZZZ6Grq4s/5r/+678wePBgLF26FJ999hkaGxvxpS99Cb/4xS+c3jUEQTiEIIoKEwmCIAiPIAgCHn/8cRrXQBBETpBmhyAIgiCIkoaCHYIgCIIgShrS7BAE4Vmoyk4QhBNQZocgCIIgiJKGgh2CIAiCIEoaCnYIgiAIgihpKNghCIIgCKKkoWCHIAiCIIiShoIdgiAIgiBKGgp2CIIgCIIoaSjYIQiCIAiipKFghyAIgiCIkub/Azg9rsAAURXiAAAAAElFTkSuQmCC", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# Learning Curve KNN\n", "def plot_learning_curve(env, model):\n", "\n", " plt.plot(knn_scores)\n", " plt.xlabel('Episode')\n", " plt.ylabel('Total Score')\n", " plt.title('Learning Curve KNN')\n", " plt.show()\n", "\n", "plot_learning_curve(env, model)\n" ] }, { "cell_type": "code", "execution_count": 206, "metadata": { "id": "cUeQulqiyReP" }, "outputs": [], "source": [ "env.close()" ] }, { "cell_type": "code", "execution_count": 1048, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 472 }, "id": "sJ_y8vQeoCKj", "outputId": "0e5df421-5da4-4441-f5bb-e8d04a36c40b" }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHHCAYAAABDUnkqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAADWnUlEQVR4nOxdebwUxdU93T0z7z14rAqC7CqCIKBijKiIG8ENxUSTuCFG4xL3XVyCqAiaQNwRl7hFo9FEkk9FYoyouG8YFcUVQUUBZYf3Znq6vj+mq6d7prureu8Z6vx+JrzppWpqqqtu33PPvRIhhEBAQEBAQEBAoE4gJ90BAQEBAQEBAYEwIYwbAQEBAQEBgbqCMG4EBAQEBAQE6grCuBEQEBAQEBCoKwjjRkBAQEBAQKCuIIwbAQEBAQEBgbqCMG4EBAQEBAQE6grCuBEQEBAQEBCoKwjjRkBAQEBAQKCuIIwbgcQwb948SJKEefPm+br++++/x5FHHoktttgCkiThxhtvDLV/ArUPSZJw1VVXJd2NzRpvvPEGcrkcvvrqq6S7Eivuu+8+SJKExYsXG5/tvvvuuPjii5Pr1GYEYdwIcIE+qPS/TCaDHj16YMKECfjmm28S6dN5552HuXPnYuLEiXjwwQdx4IEHJtKPWsPChQtx1VVXWRZdN1x11VWQJAkrV66MtmMC3KAvBo8//rjl83w+j0MPPRSyLOPPf/4zgPKz29jYaPus7rPPPthxxx0tn/Xt2xeSJOGss87ibtsJl19+OY4++mj06dPH0qZ5PWlqasLQoUNx4403QtM0rvvWIi655BLcdttt+O6775LuSt0jk3QHBGoLV199Nfr164eWlha89tpruO+++zB//nx88MEHaGxs9HSvvffeG5s2bUIul/PVl//+9784/PDDceGFF/q6fnPFwoULMXnyZOyzzz7o27dv0t2JFJs2bUIms3ksc4VCAUceeSSefvpp3HXXXfjNb35jOd7a2opp06bhlltu4b7nXXfdhYkTJ2Lrrbf21acFCxbgP//5D1555ZWqYz179sTUqVMBACtXrsTDDz+M8847DytWrMCUKVN8tZd2HH744Wjfvj1uv/12XH311Ul3p64hPDcCnnDQQQfhuOOOw8knn4y7774bF154IT7//HP861//8nwvWZbR2NgIWfY3DZcvX46OHTv6utYOLS0tdf3WWOvw8/s0NjZuFsZNoVDAL3/5Szz55JOYNWsWTjrppKpzdtppJ9x111349ttvue45ePBgFItFTJs2zXe/7r33XvTu3Ru777571bEOHTrguOOOw3HHHYdzzz0XL774Ivr06YNbbrkFxWLRd5txQdM0tLS0eLpGlmUceeSReOCBByBqVkcLYdwIBMLIkSMBAJ9//rnl848//hhHHnkkOnfujMbGRuy6665VBpBdzA11kS9cuBD77rsv2rRpgx49euCGG24wzqFudkIIbrvtNsO1TfHFF1/gqKOOQufOndGmTRvsvvvueOqpp2zbfuSRR3DFFVegR48eaNOmDdauXQsAeP3113HwwQejU6dOaNu2LYYOHYqbbrrJ83ekfZ0/fz7OPvtsdOnSBR07dsSpp56KfD6P1atXY/z48ejUqRM6deqEiy++uGrR0zQNN954IwYPHozGxkZstdVWOPXUU7Fq1SrLeX379sWhhx6K+fPnY7fddkNjYyO22WYbPPDAA5b+HHXUUQCAfffd1xg7v3FPXsfjxx9/xIUXXoghQ4agubkZ7du3x0EHHYT33nvPcp7b7zNhwgQ0Nzfjm2++wbhx49Dc3IwuXbrgwgsvrNoUK2NuKMX22WefYcKECejYsSM6dOiAE088ERs3brRcu2nTJpx99tnYcsst0a5dOxx22GH45ptvmHE833//PTKZDCZPnlx1bNGiRZAkCbfeeiuAklEyefJk9O/fH42Njdhiiy2w11574dlnn+UZcgCAqqr49a9/jX/+85+YOXMmfvvb39qed9lll3kyVvr27Yvx48d7MogqMXv2bOy3336W59MJjY2N+MlPfoJ169Zh+fLllmN/+ctfMHz4cDQ1NaFz58749a9/jaVLlxrHb775ZiiKgtWrVxufTZ8+HZIk4fzzzzc+KxaLaNeuHS655BLjsz/+8Y/YY489sMUWW6CpqQnDhw+3pdwkScKZZ56Jhx56CIMHD0ZDQwOeeeYZAMCHH36I/fbbD01NTejZsyeuvfZaR0N89OjR+Oqrr7BgwQLmmAj4hzBuBAKBxm106tTJ+OzDDz/E7rvvjo8++giXXnoppk+fjrZt22LcuHF44oknmPdctWoVDjzwQAwbNgzTp0/HwIEDcckll2DOnDkASnTWgw8+CKC0UDz44IPG399//z322GMPzJ07F7/73e8wZcoUtLS04LDDDrNt+5prrsFTTz2FCy+8ENdddx1yuRyeffZZ7L333li4cCHOOeccTJ8+Hfvuuy+efPJJ39/xrLPOwqefforJkyfjsMMOw5133okrr7wSY8eORbFYxHXXXYe99toLf/jDH4zvQnHqqafioosuwp577ombbroJJ554Ih566CGMGTMGhULBcu5nn32GI488EqNHj8b06dPRqVMnTJgwAR9++KExdmeffTaA0mZHx26HHXZg/i5u4B2PL774ArNnz8ahhx6KGTNm4KKLLsL777+PUaNG2W6gdr8PUNqkxowZgy222AJ//OMfMWrUKEyfPh133nknV39/+ctfYt26dZg6dSp++ctf4r777qsyRiZMmIBbbrkFBx98MK6//no0NTXhkEMOYd57q622wqhRo/C3v/2t6tijjz4KRVEMA/Oqq67C5MmTse++++LWW2/F5Zdfjt69e+Odd97h+h6qquLoo4/GE088gdtuuw2nnnqq47n9+vXzbKxcfvnlUFXVl/fmm2++wZIlS7DLLrtwX7N48WJIkmTxyE6ZMgXjx49H//79MWPGDJx77rl47rnnsPfeexvGzMiRI6FpGubPn29c99JLL0GWZbz00kvGZ++++y7Wr1+Pvffe2/jspptuws4774yrr74a1113HTKZDI466qiqFyKgRIWfd955+NWvfoWbbroJffv2xXfffYd9990XCxYswKWXXopzzz0XDzzwQNXLEMXw4cMBAC+//DL3uAj4ABEQ4MC9995LAJD//Oc/ZMWKFWTp0qXk8ccfJ126dCENDQ1k6dKlxrn7778/GTJkCGlpaTE+0zSN7LHHHqR///7GZ88//zwBQJ5//nnjs1GjRhEA5IEHHjA+a21tJd26dSO/+MUvLH0CQM444wzLZ+eeey4BQF566SXjs3Xr1pF+/fqRvn37kmKxaGl7m222IRs3bjTOVVWV9OvXj/Tp04esWrXKcm9N0zx/RzpuY8aMsVw/YsQIIkkSOe200yxt9+zZk4waNcr47KWXXiIAyEMPPWTpyzPPPFP1eZ8+fQgA8uKLLxqfLV++nDQ0NJALLrjA+Oyxxx6rGnc3TJo0iQAgK1ascDyHdzxaWlqM34Diyy+/JA0NDeTqq682PnP6fQgh5IQTTiAALOcTQsjOO+9Mhg8fbvkMAJk0aVLVd/nNb35jOe+II44gW2yxhfH322+/TQCQc88913LehAkTqu5ph1mzZhEA5P3337d8PmjQILLffvsZfw8bNowccsghrveyAx0f+pvfdtttjufSOfjmm2+Szz//nGQyGXL22Wcbx0eNGkUGDx5suaZPnz5Gv0488UTS2NhIvv32W0vbjz32mGsf//Of/xAA5P/+7/+qjo0aNYoMHDiQrFixgqxYsYJ8/PHH5KKLLiIALOOxePFioigKmTJliuX6999/n2QyGePzYrFI2rdvTy6++GJCSGnubbHFFuSoo44iiqKQdevWEUIImTFjBpFl2fJsV86vfD5PdtxxR8vvREhpLsmyTD788EPL53TNef31143Pli9fTjp06EAAkC+//LLq++dyOXL66ac7DZ1ACBCeGwFPOOCAA9ClSxf06tULRx55JNq2bYt//etf6NmzJ4AS7fDf//7XeDNeuXIlVq5ciR9++AFjxozBp59+ylRXNTc347jjjjP+zuVy2G233fDFF18w+/f0009jt912w1577WW53ymnnILFixdj4cKFlvNPOOEENDU1GX+/++67+PLLL3HuuedWxfNQ17qf73jSSSdZXPM//elPQQixxEYoioJdd93V8j0fe+wxdOjQAaNHjzbaWblyJYYPH47m5mY8//zzlnYGDRpkUIUA0KVLFwwYMIBr7PzCy3g0NDQYMVbFYhE//PADmpubMWDAAFtvReXvY8Zpp51m+XvkyJHc39Pu2h9++MGgJSnd8Lvf/c5ynp16yA4///nPkclk8OijjxqfffDBB1i4cCF+9atfGZ917NgRH374IT799FOu+1aCUmD9+vXjOn+bbbbB8ccfjzvvvBPLli3juuaKK67w5b354YcfAFi9umZ8/PHH6NKlC7p06YKBAwfiD3/4Aw477DDcd999xjn/+Mc/oGkafvnLX1rmf7du3dC/f39j/suyjD322AMvvvgiAOCjjz7CDz/8gEsvvRSEELz66qsASt6cHXfc0fJsm+fXqlWrsGbNGowcOdJ2Po4aNQqDBg2yfPb0009j9913x2677WZ81qVLFxx77LGOY9OpUyehPowYwrgR8ITbbrsNzz77LB5//HEcfPDBWLlyJRoaGozjn332GQghuPLKK42Fi/43adIkAKji0yvRs2fPKo6+U6dOVTEmdvjqq68wYMCAqs8p7VKZa6NyU6CxQ5XSWDP8fMfevXtb/u7QoQMAoFevXlWfm7/np59+ijVr1qBr165Vba1fv57ZDsA/dn7hZTw0TcOf/vQn9O/fHw0NDdhyyy3RpUsX/O9//8OaNWuq7u20aTc2NqJLly6Wz7x8z8pxohswvf6rr76CLMtV7W+33XZc999yyy2x//77W6ipRx99FJlMBj//+c+Nz66++mqsXr0a22+/PYYMGYKLLroI//vf/7jaAIAbbrgBvXv3xpFHHslNc3g1VvwYRGYQh8DZvn374tlnn8XcuXNx++23o0ePHlixYoVFdfnpp5+CEIL+/ftXza2PPvrIMv9HjhyJt99+G5s2bcJLL72E7t27Y5dddsGwYcMMamr+/PkW4x8AnnzySey+++5obGxE586d0aVLF8ycOZN7Pn711Vfo379/1ed265B5THjikAT8o/5lBAKhYrfddsOuu+4KABg3bhz22msvHHPMMVi0aBGam5uNILoLL7wQY8aMsb0Ha4NQFMX2c6dFMgicvAJu8PMdnb6T3efm76lpGrp27YqHHnrI9vrKDT7OsaPwMh7XXXcdrrzySvzmN7/BNddcg86dO0OWZZx77rm2AZhOv4/T9+RFHOP061//GieeeCIWLFiAnXbaCX/729+w//77Y8sttzTO2XvvvfH555/jn//8J/7973/j7rvvxp/+9CfccccdOPnkk5ltdO/eHc8++yz22msvHHLIIXjhhRcwbNgw12u22WYbHHfccbjzzjtx6aWXcn2Xyy+/HA8++CCuv/56jBs3juuaLbbYAgAcDc62bdvigAMOMP7ec889scsuu+Cyyy7DzTffDKA0tyRJwpw5c2x/s+bmZuPfe+21FwqFAl599VW89NJLhhEzcuRIvPTSS/j444+xYsUKi3Hz0ksv4bDDDsPee++N22+/Hd27d0c2m8W9996Lhx9+uKo9P+uFHVavXm2ZBwLhQxg3Ar6hKAqmTp1qBENeeuml2GabbQAA2WzWsnDFhT59+mDRokVVn3/88cfGcTdsu+22AEoUglP/4/yO2267Lf7zn/9gzz33DG1hDfuN0ct4PP7449h3331xzz33WD5P22Lfp08faJqGL7/80vJW/tlnn3HfY9y4cTj11FMNauqTTz7BxIkTq87r3LkzTjzxRJx44olGsOtVV13FZdwApfGfO3cuRo0ahTFjxuCll16y9SSYccUVV+Avf/kLrr/+eq42tt12Wxx33HGYNWsWfvrTn3JdM3DgQADAl19+yXX+0KFDjTYuvPBC9O7dG9tuuy0IIejXrx+233571+t322035HI5vPTSS3jppZdw0UUXASgZkHfddReee+4542+Kv//972hsbMTcuXMtHuh7772Xq89Aaa7Y0Yp26xBQCrTO5/OBg/gF3CFoKYFA2GeffbDbbrvhxhtvREtLC7p27Yp99tkHs2bNsnVhr1ixItL+HHzwwXjjjTcMjh0ANmzYgDvvvBN9+/at4ssrscsuu6Bfv3648cYbLbJSoPxWH+d3/OUvf4lisYhrrrmm6piqqlV95EHbtm0BwNe1dvAyHoqiVHlHHnvsscSyXDuBeqBuv/12y+deEuB17NgRY8aMwd/+9jc88sgjyOVyVV4PGpdC0dzcjO222w6tra2e+jtkyBA89dRTWL9+PUaPHs0cT7Oxwpst94orrkChULCkZXBDjx490KtXL7z11ltc5wPAxRdfjEKhgBkzZgAoxS4pioLJkydXzRtCiGX8qJT8r3/9K5YsWWLx3GzatAk333wztt12W3Tv3t24RlEUSJJkSSGwePFizJ49m7vPBx98MF577TW88cYbxmcrVqxw9La+/fbbAIA99tiDuw0B7xCeG4HAuOiii3DUUUfhvvvuw2mnnYbbbrsNe+21F4YMGYLf/va32GabbfD999/j1Vdfxddff12V0yRMXHrppfjrX/+Kgw46CGeffTY6d+6M+++/H19++SX+/ve/MxMGyrKMmTNnYuzYsdhpp51w4oknonv37vj444/x4YcfYu7cuQAQ23ccNWoUTj31VEydOhULFizAz372M2SzWXz66ad47LHHcNNNN+HII4/0dM+ddtoJiqLg+uuvx5o1a9DQ0ID99tsPXbt2db1uxowZaNOmjeUzWZZx2WWXcY/HoYceiquvvhonnngi9thjD7z//vt46KGHDO9PWjB8+HD84he/wI033ogffvgBu+++O1544QV88sknAPi9X7/61a9w3HHH4fbbb8eYMWOqgtQHDRqEffbZB8OHD0fnzp3x1ltv4fHHH8eZZ57puc8jRozAP/7xD4wdOxajR4/GSy+9ZFBDdqBU06JFizB48GDm/alBdP/993P36fDDD8cTTzzBHWMyaNAgHHzwwbj77rtx5ZVXYtttt8W1116LiRMnYvHixRg3bhzatWuHL7/8Ek888QROOeUUS4bykSNHYtq0aejQoQOGDBkCoGR8DxgwAIsWLcKECRMs7R1yyCGYMWMGDjzwQBxzzDFYvnw5brvtNmy33XbcsU8XX3yxUf7lnHPOQdu2bXHnnXeiT58+tvd49tln0bt3b+y8885c9xfwibjlWQK1CbOctBLFYpFsu+22ZNtttyWqqhJCCPn888/J+PHjSbdu3Ug2myU9evQghx56KHn88ceN65yk4JWyVEJK8t8+ffpYPoONFJy2feSRR5KOHTuSxsZGsttuu5Enn3zScg5Lzjp//nwyevRo0q5dO9K2bVsydOhQcsstt1S1w/qOTuPmJK8+4YQTSNu2bav6c+edd5Lhw4eTpqYm0q5dOzJkyBBy8cUXG/JcQqzyXTNGjRplkZcTQshdd91FttlmG6IoClMWTvtq95+iKJ7Go6WlhVxwwQWke/fupKmpiey5557k1Vdfreqj2+/jNEa0n2bAQQpeOe70dzLLdjds2EDOOOMM0rlzZ9Lc3EzGjRtHFi1aRACQadOmOY6XGWvXriVNTU0EAPnLX/5Sdfzaa68lu+22G+nYsSNpamoiAwcOJFOmTCH5fN71vm7j8+ijjxJZlslPfvITsnbtWtdnl8rq3aTgZnz66afGnGFJwQkh5J133qlKzUCI83NOCCHz5s2r+t3+/ve/k7322ou0bduWtG3blgwcOJCcccYZZNGiRZZrn3rqKQKAHHTQQZbPTz75ZAKA3HPPPVXt3XPPPaR///6koaGBDBw4kNx7772Oc8luvSGEkP/9739k1KhRpLGxkfTo0YNcc8015J577qmaU8VikXTv3p1cccUVtvcRCA8SISIHtICAgAAPFixYgJ133hl/+ctfXKW+AmXsv//+2HrrrauSU26OmD17No455hh8/vnnFnpMIHyImBsBAQEBG2zatKnqsxtvvBGyLFuCUgXccd111+HRRx+tSsOwOeL666/HmWeeKQybGCA8NwICAgI2mDx5Mt5++23su+++yGQymDNnDubMmYNTTjkFs2bNSrp7AgICLhDGjYCAgIANnn32WUyePBkLFy7E+vXr0bt3bxx//PG4/PLLN4tK4wICtQxh3AgICAgICAjUFUTMjYCAgICAgEBdQRg3AgICAgICAnWFzY441jQN3377Ldq1aycKlwkICAgICNQICCFYt24dtt56a2ZC1s3OuPn222+rKjELCAgICAgI1AaWLl2Knj17up6z2Rk37dq1A1AanPbt2yfcGwEBAQEBAQEerF27Fr169TL2cTdsdsYNpaLat28vjBsBAQEBAYEaA09IiQgoFhAQEBAQEKgrCONGQEBAQEBAoK4gjBsBAQEBAQGBuoIwbgQEBAQEBATqCsK4ERAQEBAQEKgrCONGQEBAQEBAoK4gjBsBAQEBAQGBuoIwbgQEBAQEBATqCsK4ERAQEBAQEKgrCONGQEBAQEBAoK6QGuNm2rRpkCQJ5557rut5jz32GAYOHIjGxkYMGTIETz/9dDwdFBAQEBAQEKgJpMK4efPNNzFr1iwMHTrU9bxXXnkFRx99NE466SS8++67GDduHMaNG4cPPvggpp4KCAgICAgIpB0SIYQk2YH169djl112we23345rr70WO+20E2688Ubbc3/1q19hw4YNePLJJ43Pdt99d+y000644447uNpbu3YtOnTogDVr1oRaOLO1ZSN+/H5paPfzCi3XDqSxo69rpda1kFvXhNshTmiNnUByzb6ulTf9AKmw0WfLEortegAcBdiqoBWhrP/W5dYKis3dfd5bhbJ+mffr6OUNHUAa/M1rqWU15Pw6321v1a4Rsuz9OxNC8P26VvhdipradkDHLbv5unb92lVYt2q5r2ujRrG5OyDb1zbOZWR0bddofyEhwNpvAKLZHm5RNaza0BpWNy2QJBlb9dwWkuz9vbmoESxbs8n53oWNIJkmf89VsQCAAErO+7UA5PXLIGmqr2uJ0gitbRfH453a5NC2waGGdWETsGGF47UtBQ2NWX8+ik2FIlZvzDseJ9m20Jo6e76vVNgIeeNKZJraostWvXz1zQle9u/Eq4KfccYZOOSQQ3DAAQfg2muvdT331Vdfxfnnn2/5bMyYMZg9e7bjNa2trWhtLT/Ia9euDdRfJ3z5wasY+OTPI7k3DwpEwfGFiXhNG+TpukHSYjyR+z0aJH8PblBsIjmMzt+Ar0lXT9eNkd/EzOyNkCX/tvn/FXfHWYWzPV/3l+wU7KV86HrOA+po/F490eOdCZ7MXY4d5cWe+0TRSjIYl78GH5E+nq4bIX+IB7NTkZHsN8QoIQHwZ5qUUCQSFux1O3YafYyn67754kN0vn8fdJecF/gk8Z62DQ7PO6+Jvz90EH6zV7/qA0+eC7x9n+N1jQC6B+6dM95qtz92veAfnq877u7X8eoXP9ge6yN9h2dyl+LR4j64Sp3g8c4ET+cuQwPyGJ3/AzSPhMUlmb/i9Mz/eWzTismF43Fv8SDbY01ZBf+5YBR6dGyyHmhdB9y0E7BxpeN9HcxbLjTp/zmhSCT8tnAB/qvt4um++8tv457cdHySGYAuV7wRoIfBkKhx88gjj+Cdd97Bm2++yXX+d999h6222sry2VZbbYXvvvvO8ZqpU6di8uTJgfrJAwkSWkg28nbskIWKrFTEMOUrvCvv6OnaYdISNEgqNCIhH/N0yEFFk5THjplvsIJ42952lr+ALBGoRIYKxdO1Mghykoqd5c/RkPH+1rOz/BkAIE8y0GB9i5ShIScVsYvyGRo8LqJZFAzDppVk4dVsy0FFg6RiaGYJviA2m54LdpYWIyNpKBIJhQDzoDHr7bcAgFZV8+21yUKFIhG0fPUWAG/GzfefvoMeUj6Rue8GCUCDVMAw+Qs0ZVC1GRc1AlUjWLB0tf0Nvn6r9P9yFpCtv4dGCPJqNAYsfa66r/cXJvDu0lUAgJwiVzlnhkhfo0nKYxflc8/PVQPyGCR/BQDoktmE1Wjn6fpdFPq8K54NIwUaslIRuyhf4GGp+tpWVcOmQhGffLeu2rhZ9VXZsMlUmzGFIkFR06DIErKKt34VCUHBZR7Q52rnzJd4WdvV073b6C9IRTmZ/ZAisSd66dKlOOecc/Dss8+isTGI/emOiRMnWrw9a9euRa9e4brKAGDArvsBuzpb2JHin2cA7/4FE3+2DSaOtH87cMRby4AnAXmHQ9D464ei6Z8T/nwQsOQV3HH0EGCQx37PnQ+8CmT2OguZ0Vd7u3bZ/4BZI9GzfQaLLvDYLgBcrQEakLvgfaD91tZjX8wDHjgcO27VhEW/83jv1nXA1NI/G65YCmTd3qts8NejgUVP4/pxA3H9cI9tv/gB8F9AGX48lMNu8XTp16s2Yq/rn0dDRsaiyd7Hc89rn8XK9Xk8c+5IDOzmjVJ7beZp2P37v4IUC57bJWrJo/tRwxAMvuwlz9dHhtZ1wNSeAICPJu0PZK3r44OvfYUrZ3+AQtFhcyrqnqjx/wT67mk59OHXazD21vnYukMjXpm4f6jd/uy9+djuiUOQIf68wIViycCdf8m+6Nq+Yk94fwPwd2BotzZYdLrHOdayBphW+ufrl+wNtPPoJ7x7BvA1kDv6QWDgId6ufeMu4OkLMXbHLTH2l9X9/vntL+OdJauRt/st6e/YoRdwXrXBeN3/fYh7X16Mo3frjak/H+KpW/M/WYET/vwGBnVvj6fPGVl9wjMTgddux1l798FZB3gcb/232qHHFt6uCxmJGTdvv/02li9fjl12Kbu8isUiXnzxRdx6661obW2FoljfOrp164bvv//e8tn333+Pbt2cJ2tDQwMaGhrC7XzaQC1kHwu8cY2SgJWtZKx98AJ6jZ+3A/pdiz7oCEIAzaVtOcC9zePg53vJyYwnfWt03GwZoJ6EjI84DaL3V9K8j7emlq7RpPR4bQBYf4Nivsq4yepxTUzjxuaZpptoxuObPg+UTKm9DLwbN0WNoKiVjBvbvtH5GfS58nW9fk2gtcb+mcy4PTvGM2k/P+k1fp476rXJKg7xS8ZaEmC8kthTTEhMLbX//vvj/fffx4IFC4z/dt11Vxx77LFYsGBBlWEDACNGjMBzzz1n+ezZZ5/FiBEj4up2OkGD5AJNRH+BdoGQVL+NdgMYAYD9wxvGd4JURSdwIaHxpMaNRmBsUF5A39hzPjZcSf8NJB/fmVDjJmH3eRXM88pmjtLxzhcdxtrlhYVuhI6bWgAo2dJLZNaHcWPeoG37Rn/fQM8V4n8BZDyTOVfjxv2ZLKjE+VoGyvPA4ZkLtEYmuKeYkNgrS7t27bDjjtb4kLZt22KLLbYwPh8/fjx69OiBqVNLvvpzzjkHo0aNwvTp03HIIYfgkUcewVtvvYU777wz9v6nCvSh02rNcxPgAdKCLDgBPF3mMbZ7eI17+3DNG79Fzp8iJAyjzcd4mjejQlGD4tEwUzV9oc34UFoZc9/7eFMqK3XGjawAkgKQou0zndXjxFRHz41pHlVA1Q0irzEaPMjoHiY/tJRqMopt+0bHwY9iyeK5CbLWhP8iRZ+dgp2hymi3oD83qpOR64KCxpgHtM1a21NMSEWeGycsWbIEy5aVZbF77LEHHn74Ydx5550YNmwYHn/8ccyePbvKSNrsEMqmloTnJgQKJ9CCE/At0Na4SdCLFsRoCzCe5gXS61skIcRY2P1suJLeX9nHIkyKKfXcAK7zKKfw0lLVvyW9JucjkJ6FTK7kufFDS5mDW23nQRpoKV9rjfsz6UrpMowE+tzYxuswYNBSTvMgqbU5RKSKbJ43b57r3wBw1FFH4aijjoqnQ7WCmqelYnZ90mtIEdCK3iggo68O1FFC3hNr28nQUoDDG6gLzOf78iZkSv2VfL1hpty4UTeFTkvlWXREAGSypd8iJxVBNM1Trhu6uSuyBMUuV1Kd0lLUuLBVLjFpqQAxN9TIdaIn64CWSrXnRoATSgjBXw5Ba5EijOBbxUe/zd/V68NrDpazo47C+C18GzfJjKciS6D7kdeFlhlrwQCNuZF9BBRDLX1nkraAYsB1HhlBqE5SXpcAWDreGR/JFlmgMTcAUCh4+z3yrH4Zxk2Azbby316v9/NcMta4cnC4jaHKaDdQQLEx3hF6bpLYU0wQxk09oOZpqYQ8N+b7hNVuIO9JwN8iqfGEyZvgMYeK1bjxT0v5yR5LaSmS8BumLVzmUdaNljKr+WKmpXI5k3GTb/F0LTOovM5pKXspuLvHiF5DA4u9gHr92LSU8NwIJImap6USWnAAH54bhovaQnl5fKMKEiRtbjsAReN3HtBNSfWoljIv6n68CZJOSykBvjNJJS3lvLm4K2zc1Xx0I4yClsqajBvVo+fGUO84bbZm48ZrwsdaoKV8qKXUADE3qkFPsmipGttTTBDGTT0gDPVPkmopXxuT/qbuy1Wsq1H8tM3yrpj74/neQQOKAyxIWoDxBGORdoFqemOXfCjEqHEjE+9zyPD2pNpz4xxz46qwMd/DBKqwiUQKrmSgEd2rlPdWu4opUTfPaa3orWPmMQmk/gngJXZo13gpsKWlWAHFulrK60sUzDE3EUjBA64lYUEYN/UAQUv5aNunIcBLSwW6d9CYmyRoqdKm5JeW8rvZytS48SURprRUGj03brQUx9u++R4mlJO3hb/0S7JslO5QC16NG4ZHKQi1FORaBs3HBJOWcqEYGWtzIQxaytG4EbSUQBpQqy7ERGXTPg1CXloq0L0T8NyEFHPjN6DYkY5gQM6UqBDFl+cmHZJVW7jRUhmODdFBzRckYSIPqHFT9GzcMDwJQYKCg9BSrKSdLLBoKdeYG/dnkhoo0Sbxq7E9xQRh3NQDQkmVnUBke5IR+X5VTaxU7LIC0GKaYd+bhTDUZz7H05UqcUFef+v0U3oBAGR9Dik+EsfRrMZSGmNuXH5LOlZMhY0NzVcuvxA+LQUAqk73ql5pKZXRryCJ+MIyjGIvv+CuZKXXBMpz41h+ISEla4gQxk09QNBSPtqOiJYKdO+gAcXJ01J+PTeO+TYYkPXcKhkfnhsj8V8mhcaNGy2V8f+2z3xjDwjVoKX8ScGdaakgBkoQSouRtJMFxguckZDRjlripaUi8dwIWkogDQglVXaNJfELSin4LZPAEywXmPKKmWoLoW2/tFS59IK/pYjmVvHluSFpDih2LitBDUnb8guMQPsoyy8AgIpSu7QoKfd1rH4FCQoO4vUxxj9ovTdGhmK7oGDGGkfHLH3lF4RxIxAWAtE7AYNYgyCUnDAxZ/Pl8tz4fOtJUi0VcDyD0lJ+N1vDuPGR8p96bqRUGjdu5RdcCpVyem6iyHMDAKoUVcxNWLRUgGcySL03mg29Aq7PDWNtzodBSznVcwu0lgi1lEBYELSUj7aDGjc8npvNRy3lmnvFBUFpEiVDaSn/xg1VXKUKHGopwGa8mUGo0UnBAbNx45OWctpsk6algnqIK/uhw738QvS0lLMUXNBSAmlAGJHtSQRVhhKR7zf4lgYUh6yWAlJASyWglnJT8LggaMxNRvfcZIN4blJp3DjPT7NxU/XWzpkbxW8ANwtF3bjR1LCl4AmrpfwGx5rXVVsvnJvyjVF+wagt5YOW4paC19ieYoIwbuoBLgshE0mWpw8lIj+NtFRAJVaN1ZYCypul3zw3GZ+em0ABxYTSUjUWUGwyBKve+BkbCw1cjYqWKkqldoseY26YRlfSaqmIPDfGc+PHuNENlKJGqulJBpiquSRDBkKCMG7qAbWak0DQUlYELr/gHITKREh5bryXX6BvkH49N6X++vHcKPo4SZkGxpkJwGUOSZJklKqoemvnVktFQ0tRzw3x7LmhsUB1RkuZs6G7KN+8JvEjhFiCkL1nBhdqKYFaQJKqoyDwG5FPiEm1FPCNyqshUORo17h3DdFSAeeBa2I5FzAXWQay2UYAQAYeU/KjHISsZNPouXFX8zmq0xhqPqZKJiA0iaqlvM19bxmKvSqezEorv897gDniss7lDOWbNyl4USOWElteXyqYyRxrdU8xQRg39YBAVnYKaksFeRNLJS3lN+am9tVSfmkpvxlzM3qxxoykoah627gUI6C4tjw3gEteIZbnJsLyCwBQlP1JwZmB5bVKS5mvda0T5o2WqvTY2QYku4CZV8hskHkuVCpoKYGwsLnRUkETa5mvSxMtVdNJ/HxKwQPmXcmYKlEXPMqPqeemFo2bXMZhvJlxGsGMSRY03bghXo0bltGVOC0VxHPjHAtXLr/gzXNTGaPjX6XoFHPjHivkCkFLCYQGc2CuZys7yTw3PgNgg6ZEB/wHYRtlClza9a3ESqj8gqaV3fU+2/adoZiVdp8BGnMDeK9ETYOQlTRmKGbMoXIJBm9qqajLL2g05sbjHPRUFTwJtVQQ5Y/LOpfhUUvZtF15vtdcN0xPGUPl5Yok9xQThHFTDzAmEbFNFOWKmqalfGYNDdR2lOUXwqKlAsQkBE7iFy8tlc2WvS5e6xnR3DhKtvY8N1R6Xy0FT7b8AvHpuWF68GqalnIpgupKSzmvzZXne/WYFljJMy0FgGP2QIcEYdzUA8wTMe4g1iDwXQLB1Gc/WUPptYB/QyCSPDcBgxd903xm4yZeWkoNGOCqZDJQSelar4njFD0IOdXGjcPzbKjTqmgp9/kZdfkFSkt5XYeYgeWByi+Y1pconncWOGJubAOKXQJzK8+3LcXhAqq0ci6caSoA7DkIW9BSAmHBr5VNSMIxNwl5OIDghgBX+YWkPDcB3PYx15bKs9LAc6CgF2ssFFo8XZeFTkul0rhxn5+Ob/ycWW0dJdcBQT038KyWYiRzTJqWCiWg2C3mxlsR1MrzfdNSTvmOJCnAepKOmm3CuKkH+A3+0ooASPU94kLgTL5hvE3VoVrKoY6Nc7vBaT7XTKsuCIMmKUj+KlFndVoqk0rjhqWWctgUmeUXovXcEL1drzE3my0t5ZZCIQZaypUOTqqMTEgQxk09QJL8BZOGoToKgqQ8HOZr61EtZb4XV7um8fRJ8yUVcwMAKvwVa8zoaqlMLo3lF9yfDSOA2ylDMUMtFXXMjde5z/Qk1LRaiu25sa8t5bzOUePE+DuKl4qkCgCHBGHc1Av8GAphqI6CwK+6J4wFJ9LaUgHVUqEYNz7mQYDxpOUT8qq/ZGJB1DvUuPESUEw0DTlJj7lJZW0p940l4xTjxFD3RJ2hmD5Xkk/jhmZetsCs5gMSoqWCrDVuail/VcGrpOA+89y4Pnd+9hStWPIcA6K2lEBI8GNlh5EMLwgsNIqHh7NmaKmEYm4Af/MgwHj6lYIzk4lxoFyJmt+4KZgorGyu0XfbkYGV58Yx5ibZJH6UlpI8Bv2WY4Fs+lV5rzqipehzky9qIJVpPDzQUn5jbkKnpZLeU0wQxk29wE+ch2Z6y/OrOgoC8+T3shiGGeTnW13GQUt5vTcjdT4TsgJIsve2Q0iXTjcl1YuRiuDlFwCgqHtuNA9zXzUZQtlU0lLuz7OjMclQ90RdfqE8970pbFzLL1SOgWfFk6kvUTzvLLiWXyh/36rily7rXLVayqNKkSf2yo+aNelQBxOEcVMv8GVlJ8yN+lV5hZo1NEq1VAJcdULzIBuQlgoUc0MrUXvx3OTT7rlxn0OO0ntOtVR0xk2p335pKXvjJu/+Nws1oJYCvBVBrQ4o5n+p0DRiSsHgRkv5WEvMhqTw3AiEgiC0VFKT0HcAbLQLjnvbKaalLG3HTUv5lIKHEANCK1FrHtRShXxZNp5JY4ZiZhK/YLRUVOUXJD1+SfZJS9nOg6Cem1TTUuXfoVr55vxcBpGCm6uJOwZwm9v186IkKf4TrIYEYdzUCwK9sSe0uMtK6SEw94UHQcsUAAGUWpTKy7jcOyG1lPna2D03wcovuC6yDBSNStT835nSUnmSgSSncBlkBNuzY26cAoqDB3C7Qu+355gbN2lyYM9NCGopt+edBdfaUuXfwfG35Ci/4EUKbj7X1ciV/bwwp0MGDgjjpn4QRAqeJDcaaDNOgpbiGLOkakuZr43ZWAwqBc8GMDAMz43KT0tRCktFsm+XjmCppWSn8gvOBjIhJJQAbjf49dyU1TubFy0lSZLxW1qeHUJcY+GC0FJmZZWtOo0iSEBxwvE2gDBu6ge1SEsBAWmUeqOlwvxeXoIAw6Sl/MXcBMlQrMnUuPHgudFl4zQBYOrAS0tVxji5zE9V43xjDwBq3EjEa0CxCy1VGZwcpLxIymgpwJzrxvRbMlRHlc+ZJ+NGp6UkCVBcjZuEXjxDgjBu6gV+FDppsLL9GGUhqHv815biUDQlmnnZz4IUfDx901JhqKV0Wop4GO+iXh6ggOQXYVswnmdqnFSp01zmkFlRE8SYdIOs91vxXFvKAy3lV/EEeDP6zW1FVFsKMD075t/SUsyWx3PjnZbKKjIkN5WsH+VbGvYUHcK4qRfUolrK3HY90VI1q5byP56u1Y1dEIZxo/moRF2mpdLquWG97bNoKfd6RNHRUqVSFjLxGVBsF3sVKi2VpDfVIX7KLjicIamuTNrnh5Zieu8C0VLJvzQI46ZeULO0VJB+C1qquu1kxrOs3olfCk5T/nupZ2QYN7VKS9lRGebzGW/7rrEWASDryjPFY54b11igwGqpWqal7Ou9VdFSHjIUc2epTkicEBaEcVMvCDIRk0yTHSgQOubAW8BkCLippYKWlYhGmcFuN0D5BRrg6jENvJF2P4B6x5/npnRuMbXGDW/5BQfPjY26x7ypudIRASDrMTeKT8+NrdEVpufGb1HZIM8kY63J2HnhzM+kzW9VVX7Bg+fGNXjbDF8vSinYU3QI46ZeIGgpj+0KWqq63STz3IThueEfb6qsKqaelnKSgjvEOLl5btSIsxMDkHVaKuM5oFj34HHRUgHy3Hi9PoZn0vbZYeUrqspz4z3mRtBSArUBP6myg6b7DwO0bU+B0LTfSZZf4KClPJVA0MoF58JwgXuhBbTg41kuvxBBGngGNB9eOE0/t5iCN0xbMINQHWhAN/mwFnF2YgBKVpeCw69ays64CaCW0ooAHMaIB2FSxQ7PpBEcbv4ti+5rc3X5Bf6XCtUzLRWzMRgShHFTL6h5z0263qac2/ZQW8pT2vKQCs4lXH7Ba3VirgJ+LNDEcV7UUoXSuVRplTpYispW0yiOnjKOYouRem504ybjN6DYNkNxAFrK7ty44xJj8Nz4oaWY8yBIvUJh3AiEhlrNSSBoKet9WfdmITFaykG9w0BZJeM/BoRWovZCSxFKS6U95gawnUd+yi+UswBHVyQ3kw1IS3El8fP5XLl9xro+lIBiJ+PGLebGvt3q8gv+pOCuELSUQCqQUE2hwAii7om7/II5a6hb20EWBfP1fpCQWsqvFJwGIAeKuaHFGjUPtJSaclrK3C+beeQn5oY7kDQAFF0tlfFASxU1YlTEdlVLZfQCp76fqwYf14cQIMtdBNVs3LiXeaHnNjgZuS7gLnliZFqvsRdmHcK4qRdsluUXEiowCbg/vEF+CwfpJzcSKr9AN0yNwNioeGC8RQYovyAZtBT/d6bKKlKjnpuMPl5Vb+wu6h5uCXAAKLrnJuvBuLFI1N1oqVxb6988MOZ2xmQc1QEtpXvh2jZkqq9loFzyhBVzk1DIQEgQxk29YLOipRIuMAlEQEuFxFUnVhXcpQCgC8KhpbwXa6Q5cbTUem7ci8qWyy94oKViiLnJZEsGhBdayjxf7D03lcaNz8028Rcpd1rKmueGYdzoweFtcqU54q38gqClBGoJvlJlh6A6CopaKr/ASIke6N5hvfEklcTPtFDyLrSEEENdFWTDlfR+Sx7mPo3PSa1xA7iq7igt5Vx+waa2lJvcOiRkcnrMjQfPjaUshN08oL9rVjdu/Cgr5WyCa427ktXw3FjKL7irpajHs2zcePCW8tJSvpSsIqBYIGwItZS/dr0k9WJkDa26dxLxTwmrpQD+hbbA2tR4QYs1elqEU+65AVznER0vZ1qq+ntFXREcADK6WionFUEqDS8HUGNYkSX7Qo6G56aN9W8emL3TSWVx5y6C6oWWKp3blPNPSzEDywUtJZAKCFrKY7vuMQ327ZoeXNeCcwnGPyU0nuaNiXehNZ8XRApOPTeyh4Bi6IUzSaqNG+d55Ci956KlIlRL5RrL7RX4fo88q1+h0VIJvQAyEzLa5CxirM30t2zrh5byLAUXtJRAkgjiLUhygQ+UKCqE8gvm+4XVbqBFIQlaKhzDypC0cua6scZa+N9wJcNz44WW0gOKa924MW9qZjVfQjE32Wy53UK+hesapjSZzuVcs/63z802qbQTsvszaS8FdzcS6LltqOemssaYC/LcUnCqlhLlFwSSRK0WOUtK5eXLc8P5VkKPe6K86HcKqN5J0INHFU+8b5HmxdyWjuCEpNBijd4X4dowbuxoKRsvmfk8u9pSMZRfyOoxNwCg5lu5rmEaXXZqKcK5mdsGFKeLlrKtE8aZxK9tQ8lz4yW/FHc9t1oNddAhjJt6gaClvIGhRrFvl/PB9WM4hZXZ00/ph5BcyVmPJRjMiduCFHKknhvZQ1ZcKUWLsCNcng3b8gsMNR+T/gkBipKBRnRPRIHPuCnnOnKipfTfNdum/Bmvl868xiX2IuXuVbfNEcVYm9WqgGLv5RdEbSmB2oAxEeOtKRQYfmohhS2b5jUEeNs1H+e+d0iGZlKKEHinpbhr3DBAK1HLPuppkRQswo7gCCi2bGoMNZ8aAy0lyTIKejHSImfMDVMxZ9BSbas/Y8GsOvL6vBMSzhrJUB3R+W9fW8rdc9OUzVRfywA/LeWnXqFQSwmEjVp1ISbZb69xSty0lKlfnu+dREBxWDE33mipco6bYMsQrUStePHcpGgRdoSLoZrLuNFS9mo+7mrQAVE2brzRUo79qqSlzJ+xEISW4k3ayQJnEj9v5RdoEr+0BRSnYE/RIYybeoGgpXy07XHMeB9cWQEkOZp7s5DgeNqqPlyQDykGhHpuFA+J46Q0zH0W/NJSDmq+OKTgAFDQsz5zx9ywSnAYtFRT9WcsBFFL8SbtZMFX+QU+tRQNKPYUc2PkuYki5kbQUgJhY7OqLRVSRL5f44anXc8LadgGWxJqKX+em6CeBD+VqGXdcyOl4A3TES4xIn7e9rkDSQOiiJI3QfUqBXfabI3v1RDsuQpi3ERYxy5n5LkxG6ruStbKgGJ/eW54a0vFrGQNCcK4qReI2lIB2g6ZlgKY8k/Hewc12Ly2C4RmLGacijk6IKzNVvHjudFSYNiz4DI/bcfapa6U+dyoPTcqsnp3eGmp0qaecaovFob3JRAtFbDeG0stZZcfirHGlQOKaRI/DxmKNcZ4UwhaSiAVCJQqOw20lJcAWPfU5JG17SVOw/NCmiQtFc542lIlLmDmN+EELdaoeEj5L+uGUKo9N67lF0pjRsyFShlxW3GUXwAAVaelaOV15vksT4LxvGfK3gRutZTppcHrM2l+3gOo+crZ0DXb1BDl8gum54ZhfJfz3Pjw3HilpWptT9EhjJt6QSBaKg2emyTqMPl9C4yClkpQLRXSeNpKWl0QlieBem68FGuktBSN10klOGJuANN4M4NQo5eCA2XjhlctxU9L+fHchEBLheUhNvfHBPvyC+5rs6GWCpChmC0FT7AAcAgQxk29oFZdiImqpfzSUjyeG79KrBpWS9kpeFzAXeOGgYzuucl68dzQmJtUGzduSfzKS7cRd8N4a46LlipKpfY11RstxUzilxQtFZaH2NwfE3K2FCMjfkql5Rd80FLcUvAa3VN0JGrczJw5E0OHDkX79u3Rvn17jBgxAnPmzHG95sYbb8SAAQPQ1NSEXr164bzzzkNLC1+a77pGECvbJptpbAgUkR+w317jlDx5biJSYjHbTU7hYAS5eiy/EBYt5SmgmNSScWPnuSkbhMYbP3NDjD5DMQAUqeeGk5ZiZyg2e18C0L1JeVNlhufGNTjcKX7KmsSvqJEyPckAt2rOVzxkCvYUHYn2oGfPnpg2bRr69+8PQgjuv/9+HH744Xj33XcxePDgqvMffvhhXHrppfjzn/+MPfbYA5988gkmTJgASZIwY8aMBL5BilCz5ReCROSnmZbyatyEZLAFqQcTcBHPyN5ibmiujsABxdlSv714bhSthmJubOaQJEnIyBJUjZTHmxEYHkfhTKBs3BBuzw1n4cxapaVkuZQNnRRt27Yvv+DsySWEGAZK24byelEoalA4Ap/5yy8kGDIQAhI1bsaOHWv5e8qUKZg5cyZee+01W+PmlVdewZ577oljjjkGANC3b18cffTReP3112Ppb6pRqy5Er/0OK2soEDEtFSB4MQgCBQEGjLnRaSlV4/TcsPKbcCKbLVWi9mTc6J4bahilEhz5UVStaIq54QtCjdpzo+m0VFHlm4N53jw3tUpL0fbVTQxayiFnUWW3TB4aWhW8dL2GxizbuFF5kznW6p6iIzUxN8ViEY888gg2bNiAESNG2J6zxx574O2338Ybb7wBAPjiiy/w9NNP4+CDD3a8b2trK9auXWv5ry7hK1V2SKqjIPBbAgEIjwsPu/yCr3snSEuFJIv2SktRIyhonpuMXqxRkQiKKt/8p7Jxmt04lWDEbVUVz2QYyGpI6jQWirrniPCqpXjLL8gZk2eSc50zz23q0YrieWfBWA+q+02/t2rrual+Js1GUJPJuOEtwcBPS5n6zFuoNA17io7EibH3338fI0aMQEtLC5qbm/HEE09g0KBBtucec8wxWLlyJfbaay8QQqCqKk477TRcdtlljvefOnUqJk+eHFX304NatbL9cuhAymmpzTmJnzdaKuhmmzFVoi4UWqFk2MsalY3XhnHDSP5WSUsxAoqjLr+geTRu2BmKk6alwvDc8CRktEniZ/NMmmNzGjIKZAnQiB+VIictRfvDE5+Whj1FR+KemwEDBmDBggV4/fXXcfrpp+OEE07AwoULbc+dN28errvuOtx+++1455138I9//ANPPfUUrrnmGsf7T5w4EWvWrDH+W7p0aVRfJVmYH1xeKzsNEzGplOhAumippDw3mhYazec5Q7GRbyMgLWU2bjhT/mcMWir5RdgRXtP2M+YnU3IdEigtRTjnIFM1l7haKkTPjWspDT61lPm8rCLZByS7gLumG0PlZYs07Ck6Evfc5HI5bLfddgCA4cOH480338RNN92EWbNmVZ175ZVX4vjjj8fJJ58MABgyZAg2bNiAU045BZdffjlkm4yLDQ0NaGhI8dtZWDCsbFJKFMUTmBpWEGsQ+H0TC5o1FPCvaIqk/ELYxo3HWB8g8BuqraTVBWEFuGaz5eebt54RzYmjZFO8NjDUfFWbGm/5BVZm2oAgMg0o5s1zw0lLBVJLBfDcBM0aDrgG+rsWQbVRHZmfG0mSkFNktKoaf/JMlTPmxqLyilkYEQIS99xUQtM0tLbaL1AbN26sMmAUpbTBEV5vRb3C10RMgZXtVd0TZp+jdFN7lpnr3pOgEkqv6rOw6ufArPrgzVCsL9IBN1tZUaASPW6BM+V/RqellFRLwd3np1GCoUoK7i4fjjyg2OPcL6t3oqSlcv5fZsIKKHZo21AZqnyem8rYKSMJIO9LhUaNXMZLhawA0M9JYn0OiETNq4kTJ+Kggw5C7969sW7dOjz88MOYN28e5s6dCwAYP348evTogalTpwIoqatmzJiBnXfeGT/96U/x2Wef4corr8TYsWMNI2ezRZULsY37+YSkYyIm5eEABC0FVARox0tLGW/sIdAkBWSQQZ6blqLKqlR7bhhzqKoKO2dW21zEtBShMTecc59JS1mCgqnx7sP7knJaKs+plsoXrcYJ/X/P+aVYtJQkldovtvozJhNGosbN8uXLMX78eCxbtgwdOnTA0KFDMXfuXIwePRoAsGTJEoun5oorroAkSbjiiivwzTffoEuXLhg7diymTJmS1FdID8xvFzx1V8w1TtKgluJWP4QYje+XwokkQ3HIxg0pln5jFnUXIs3nlZZSQ5QmF6QMmpDnThyXISoglbMbpxIMxV25JhFfzE1cnhvice6zMxTbBAV7rS1loaViTs9A2wds1znbFAou61zZSNU9N1RtxZnEj5uWAsrGjWeF2Waulrrnnntcj8+bN8/ydyaTwaRJkzBp0qQIe1WjkKTS24lW4LOywwzMDYKkMvma71GPail6T6ZxYxrPIMUB4SOgOET1jgpaz8gbLZXJJf+G6QjGs5F1pKWSLb9AfNJSjp6E0GipJNVSHAHFnLRUZabpnFdayss8SMoDHQJSF3MjEABeHt4QYy0CIckFx3N8igcDJGlaynzPONpFeXPKq/FWBQfKxg1PQDHRNOSkkucy3Z4bXrWUN1oq6gzF9LmSvBo3dvOgUs0X5GUoySR+LgafbQoFl3WuMk9NlZHLgKcCql7GWyuWPMZAsnuKDmHc1BO8PLxhJsMLgkoahYWws4YC0XiNElNLudexqW43vPH0HnMTnidBpVlxOTw3BVO16kyuMXDbkYE7zw1vbamYPDd6+xInleEac1Op5vNN9yZYfoG2D9j226x6M4Qxrkn8rMaJbyk4Ly0FcL4opWRP0SGMm3qCFyvbHGgXkI4IBLM6iGszTpKW8uO54c03Q99Og6qlFEDSH2uezSVETxhdbL2WXwhaWwoAitALCHLE3BTy5UK72VSXX2CopWggaaVx46C4CytpIhMes3NTT5+tWqqSPq9DtZTZg2LEzXhRS3lUKXrKVO3phTkloQ46hHFTT/DyVhNmsFwQmNvn2ozD9NzQdOxegxM9qKW4752Q0RbiPCiXX+BcZDUPgY2se+meG43DuFEL5XmWTbXnxr2kSjltvz7ejGSM1OiM3rgp9Vvi9K649st8DzkLz6kjbMsv8D6TIdWwM9/DZo0zf2+Vg2Ks9LwYLxUcnhtNI6ZyFzy0lIc9RXhuBCKDH1oq6Unom0ZJMqA4xbSUpe1aoaVC8Nzolai1gjfPTSaT/CLsCMYcqspvwklLRV1+QdJzB8keaSnbeVCp5qtjWgowe+Gcn8vK58YLLVUweVW5MoN7irnR+ywpwROshgBh3NQT/AQUJ+25kZXSwwDE32/PbmovtFRCainzPeIOKPaaoTik8gsAUPTkuSnF5eRJBlLE2XoDgfE75iqNSaZaKry8Qm6QvMbcuEmTK9V8damWKv8ePIZqteeGn5Yyn8MtBQfiDxkIASl+sgU8w5dxk4I3Vz/9DprJF/CvaOIqv5CghDKheVC12TIQpjSZVqLWVHZAMQ06LiRffcYdsvscopti9dt+9RwihJiSv8VDS/F6blwDyyufiyTUUqGUX3DutyRJ1hcDQspeEJu2nfLc8Dx3ZkUV13PnRdafFjZAhzBu6glepM1pmoi+6LSU01Keyy+EuJAabXPEFoTYrvfyCx64fwY03fvH5bnR5eKqlLzr3BXM8gs0Pwq7Krg5wVtstBTxRkvZBpZXrlOeaakwqoJHnw29XIKBMGNXKlMo2NamcgClpSQJUFjlFyz9rrEXZgjjpr5Qi7QUkBiNEq1aStBSLIQpBfdGS5XOKSAdi7AjGBt5NS3Flg8D0dNSskJjbvgCd12TOVYGvAcxUJLMc8OKnzJ74RiqI0cpOEeeG8+5pWqYlkq5X1bAE7xIMFNUA8SfyitMI4C39IOf2lIpV0uFuIB7paXCLL+gMSgcy7m6AaSmffkzFHfutBRP+QWzNy1qtZSUKSVG5PXcGNJku9irSm8A9U57LQfgp7ZUqOUXGIZqhpZQ0CpK49hlKLYWGqVeH57yC56Dyj3tKSGWxgkBwnNTT6hFtZS5D/VESwUJfAyKhMazrN7xRkuFQZOUizWyx5vG3KhS2o0b1tu+Ey3l/LYPcFSDDghZV6ApnJ4b95ibzYOWsvyWjHpvlc+NQUtxeW48KhST8qqHAGHc1BNqlpZKqN+pUkvFo8xwbjfMPDfxBxRTzw3hoKWKOi1FqazUgrv8Aj8tlVNkSBEn7ZT1khaKx5gbeyl4ZUBxEFoqvdnQLXLuSoVYBZyk4DweU89UsB9aKgWlFwBh3NQX/GYoThq+NuMQ3ro9UBnWtoVayg7+Y25CCCiWS9+ZcNFSteK5YUjBPeS5KRhZgKPPRk5jbjKEN+aGRwqetf4/z3NFiH3MDe/1Yb5wMAQGlmeHKem3l4LnPUjB+Y0boZYSSANqlZbypfJKOy3lJfmVVi44F8pbIo1L8KCWCjGJHw/3D5hVMmHQUnQOcXhudO+Olnrjxm/5BbfEb9Ev+Uq21G8FvHluXOZB5fPuKft2EQChnbI+t4l5iTm8cIxn0rn8AvulQhW0lEBNQtBSPtuNQD3hpwQC773DbDsCWoq3OrFr8jav8CC9J7rnJv20FKVRNFsaxbkquDMtFYdxI1PjhtNz4+rBq0xV4GezpdfJXj038dNShUpaygbVail+j2kstFQa9hQI46a+4MVzE6bqKCh8qbxCVEt5VV548dxweU/cpZ+e4cewCpGW4q1ObNQUCkGaTIzvzB5vTS1952LqPTfumzEN4DZqCrmoe1SD+omelsroMTe8tJRrjTFHWorj3paXhhwgy+Vs6HErSnmVb0XCbLfSQPGToTgS40YTaimBqOCrplAKrOxaUEuZs4aGrZYqVizCQZHQeHqVgtPA4zC8CYQWa9TY402l4MU0xJu5gUGj5Crf2F3enI0NMYRSFywo1LgB2wApagRFzWXDDUJLVRbd9Hx9fF5iL7SUc/kF/gzF3PNA0FICqcBmVX4hzDw3HowAgK/0g5/vJMnhFJxLmJbSCIwNyw1hSsFp/yUuWorG3KRg7ruBQaNUBZJy0FJRy8ABQNGl4FkO48aaXJCn/IKftSJT8tpYrvfwzIda6oUjOJyxxhl0rlF+wUOGYkM1xzkPvIgu0rSnQBg39YVaTZWdUNI5//y9l4DiBLjqhMbTvDl5WWhDiQMxPDfs8aa5cLS0e27MNIrNb1kV48SR5yaOmJtMtrH0/xy0lMW4cZWC+1BL2Y1HSrOhW/PceIu5qVLNucB/zI1QSwkkCU9WdppoKS9qqYQl07xt+yk4F9ZmayiHPMT7hFFbyuQVYC20hBAj1iIMebKk91/imEPUc0PCeCOPGi5ztOqN3eWFpbLYYpTI5PhpKUvmZLuCnkYcRwBayjweiefUcqotZS6/wKClNHu1VF5le0tp7BW3QjHJl7SAEMZNPWGzKr8QonHjoEaxtksXavusoY739lK8Mqw3Hl8bQHi0FMAObgy9HIBerFHiCWLV51DqPTeA6Zmu/l7G2z6lAF3S33sOJA2AjK6WyklFEM3dyKXB0IosQbajzKrKL3gQAdi9NCgeDP8wA2RZMTfm4HDGGlddfqE0bipjrAEftJSnPUUYNwJRQdBSHtv1IA1lZA11vHcSgXgJjaciS0alYZbnxnw8jJgbSf/OMkdAMXS1FKkJ48Z5HvmjpWJQS+Uay+0W3H8PZiLH0GmppD03DjE3ZsUTZxK/nA9ayjM96Wm8hFpKICpsFmqpCGgp833Dajch70npPsm5kg05OCPXDTPWwiMk6rnhkN7TmJvaMG7caCnTpsZQ88UZc5PTaSkAKORbXM9lepTCUEulgpZiqaXsaCmPUnAOWirvO0Ox8NwIJImkVEdBkZTKy0tSL2PB4YzTMCgvjjo2YXvREkzmyCtLpQu0JMHw9gQB9dwoHtznJCWLsCtc4tGMgonmDRGwp6XU+GkpAFDzra7nmmte2cJNLUUYm7lrQLGXF8AwS714SeLntfxCFJ6bGi3pA2Hc1BcU/hT0qaKl/ATfhmLcuKtRrO169dyYFkTWQhp2QkUvyQnDXMDBX4LBeGOXwynkKOnyY5mjWKOUskXYFS6Gaka2oTIARvmFGKTgSgYa0Q2vgrtxkzfiR5xoqcqq4KbvxvLSpYqW8pLnxn1tri6/wC8F915+QailBNIAQUv5aJvz4fVLS3m6d8jGTZppKTXczVbO8HtuqFycpGQRdgUHLWWpJG2+xoQ4aSlJllFAyVhWGTE3zH450VIAx3MVgJYipFqpFQQMD4iXDMVl5Zsec+OpKrigpQRqEUmpjoIiSZWXixrF2q7HYDlz/5j3jug7eSq/EC8tVS69EM4SJGdKcR4yl1oqxE0rariUG6CbW0lh467mU8NMmMgBatwUGZ4b19ILQDXV4UUEYOcRlTmLyjJoPs9grHH2GYqdYm50ObfuuaOqKZWr/EKEtaXStKdAGDf1BaGW8tE255h5fSuRlVLG4SjuzUKC42lRfbggH3IMiOG54TBupDTNfRa4AoorEr/Z0HxxVgUHgIJet4sZc8MqwVE5P2UPdG8QWsprXisWPFUF9xpzw1/TrVx+QdBSArUEX7RUCiaiH9dnWPESXo0bL+1yL6RRGWxJ0FJ8nhtmIKlH0ErUGY6YG1l/w5RS8obpCpd4NFtaikFlhJEwkQdFlLxHLFqqXPOKJQXXv5ckBXuu/Bg3oZZ6cZCCUzm3aqKlnMovVNBSnmpLeX3uZD8JVlOwp0AYN/WFBFUygZBkv3kNAT8GCK+xmajnJmTjJsP3Fhl23pWMTktxeW5S5j53hcv8NG9qNOsy79t+1FBR6geLlmJLwUPwvvhRSxnHOZN2ssDosyUomGWoVni7ynluopCC1+ieAmHc1Bc8Wdlp9NwkQUtFFFAMmH4PzjfM0Mov+JC7hlSKgMYBsPj/vOFJSNJzk4K5zwJH+QVCTEaEo3FjLbYYNVSdliqq7nPfMLrsSi8A9lmCuQ2UkAyjENR8rGzo9LnJ89BSDuUXCowgfqCsluL24AlaSiAVqFUru6bUUlHQUvWjluJVboRdDkDRY2546hlRuTiN00k1OGJuABP94xSEGrI6jQXDuGF6bnhpKXNQMC+VbGO4RxVjx4IlENrmt7RUBedTS9Hf0lKXigHPtJRQSwmkAkmqjoIgyYh8FzWKtV0fChvje6VYLRVy29kMX84N1VhkQ6KlsrRYIyNhIgBZ/y2lmjBuaM4i59pSgMlD4pQbRYuXlipKpX4QpucmAC3FWufs5rbn5z1kD7G5XybQ50AtumeaBpxpKVZuKcDHS0Wt7ikQxk19wVf2zRS4EBNVS0VISyX2lpjceJYrFPPG3ITkuaHGDQ8tpZ9TG8YNm5YCTKokxw0xvgzFAFD0SkvxqqXM/46LlgoDjGzo9sHh7hRjJS1V1AiKDAPHs2pO0FICqUDNll/w4/oMOz6Fd7HzEJuyWaulWItsuJttRq9nlOWgpRT9rZzmxkk1XAxkSZIMA4fluYmzcCZQNm6IGnL5BfO/41BLhbbOuGdDt89zU902IcSmtlT5N+VVKfJnKPahlkrDngJh3NQXajbmhrPfmhZ+AjbPaik/nhveZGNhJyaM35VMNylKgzihnG8jAeOmpmJu+PKjlAOKnRK/xUtLaTotxfLcMGOBklZLhemF8JOzqLJbJs9MrsJzU7qeRQd7DCyv1T0FwripL3iyslPkQuT1npg367qkpWLO3WNpOyS1lMeq4NkQimYCQIYm8ZMIiqq7gZMh1HOTgrnPAmN+0mBSw7hh5EaJjZbS+0EK7msR9fA5qubsMoN7pqWCeG5C3KhdYgutUnDntdnsEc1W5LmpPG4HQ6XopE5z6rOmchQqTdGeAmHc1BdqNVW2S9CkBZaU6DF7OYLkuYni3q7teqClIiu/wCicqUVDSwHsYo0yaomWcn+m6Ru4prrPobjLL2jUuGEYESoz5saG6uD2iNoYRvQ+cT+T5nvZtG313Dg/k2ZFFDVQFFkCfUdQQ6elPJS7EJ4bgchgfithWtkpmohJpUQH0kFLJRVQHAHNx52hOGRaKms2bhgp/2nQsZJNwdxngZOW0lROWoo37X5AUFqKZdwUWKq5xGmpKDw3rJgb5/XA/FyZDRRLQLILytJ7j54bh35bkKYXZgjjpr5gWNnENlGUBWlyIXoNEJTkcLKGemrbDy2VkAuc14MXAc2XU/ik4GEHuGazZeOGVc+I0lJKtpY8N06ZbfWYG9V9DsVNSxmeG1bMDVMKHoZaKi20lHO/cxleWqr83Eim5IK8Nd2oao4/z40H48bodzgUd1AI46ae4GkipqgOiNfFKsxofM+1pWpJLRW/J8zrG2RotaUUBQVC6xkxjBudlsrUgueGEY9GjUONM0Mxd6xFQBDO7NxMT0Ioaik7z00EzzsLLm1bsgy7rHNOkn5LEkAXeDZyZQWAbkQJWkogMVhyKbg8vMQ9Ij92JJUPxtJ2BG5q3nIYSZVfMB8PqW0aGMouv0ADScOjSVS9WCOLlsrWVMwNHy3FW1sqFxMtRThjbpiB5WEYKLaGUbpoqXL5Bfe1ueCQjDHLG8iv+SigGsSYTBDCuKkn8AZ/mQN3U+G58brgxCPPtLZdg7QUKbrTk0a/QioOCD+0VHhLUMFI+d/ieh6lpTJ1REtpDFoqr8ZLSxHO7NzMeaDZ0VK860U901KVxg1vrJuPwPKkcgMFhDBu6gmSxKcGiEJ1FARJ5YMx34u7bT+1pRJSS7HaNr9phVEcEPyLrBoyLQUAKmhWXPfxNmipXArmPgsuChvAVDyTMYfUkNVpLBDO9A5Gtl0mLeVH8aRazzffJ4rnnQVXKTj1eGqu61yZlrI+r8b1jAzFvl4qvKpZ07CnQBg39QceKzsK1VEQpPRtytp2DaqlzPeOo12UN6m8ylpkw99sqXHjFlBMNA05qeTNqi3PDS8tlY6AYqLPfYmXlrLrl5OaLxAtlXa1lDst5ZSM0ciTw6Cl8n4C+XnWZ61Y8hSbz08YwripN/A8vJZYixREtnPTKBFE43suv+DHcxM3LeXVcxPe2ymv5yaKjLkqzYrrElBcKJR/i0yuMbS2IwNjI6d5bghjDhVirgpOnyuJ4V1xlYI7qflipaVC9Ny4xOCZA/GJyzrnpDL0LAX347lxfVFK2Z4CYdzUH7x4buRsaHREIHBvxlFmDY1A0cSdbybkCsSyUq5j40pPhj+eRnVj7vILIQYUcxRrLOTL8Ti5XC15btxjblibcdi1vJjgTGCZdyvo6eRhjlMtFVN8n4WedXkuVYffkTd5pufyCwDnC3PK2AAI46b+wGVlp0gpBfCrvCJZcCKkjnjLYUT6vTjetkJsl6ql2LQUVcmEtwQVdVpKc/HcmCmrmpCCM+YnLb9gvO2npPwCDFqKz3NjW37BKTawHtVSZk8MRxK/SuMkx+Ex1TRixORkvJQ94XphjqA0TkAI46be4IWWSskk9BUAG1rbdaiWsrTN47mJn5Yqx9yE57kp8nhuTIZPpg5qS9EYJ4kz5iau8guSXuuLl5aynQfG3K1Q83mOmzHRJJ5p6Hji+yxGp8v6XK4NZR2vDIdKsWDypnrKDO7lhVlSwkuwGhDCuKk38GxqKUuTzU+jRGHceFRe1EJtKfO9XIMAwx/PrFcpeEjlFwCgSFP+u6ilVD3mJk8ykGJKaBcIjI3cMFYY6h6Dzogpz42kzymZMfdda145GRieFY42nptEnknnti3Gncs6x5aCO3tMzcf8ScFd1FJp21MgjJv6g5eYmxRNRG/9jkeead92DailzPeKeTx53OPm42HSJLQStVFnye4c3XNTQDqCHplgqqX0TdFlcyGERBLA7QpO48a1X47GTf2ppSRJ0n9LUvZ2eaCleDymZiWVt4BiD7RUivYUYdzUGzzRUila4LlolJAz+QLeF0ovbadd4h7BglRWbfBJwcOkSSgtpbnQUjTmhib8Sz2Y5Rd0WsrltzTnPgkzxskNkk75yYSTlrLz4Dl5T4Lkj/L6TIa61rjH4GUVGVmY1KI2qiOn8guWJIAOoMdkCVC8xNzwlNIw1rD0PFfp6YlAOKhZz01CgdBpUEv5oby4245XLUU3KTUBKbhmeG5cjBudllJrZenjzHMjuXjhLJWkY6KlZE7PjXvMDYuWisNzE1829NJvqVafb0K5/IKDFNwlz03BbyLHhNaSoKiRJ1yAGzwBcymciN6MmxpRS3HXlorgLZFrHlBPWHjLAK0RxEtLhVlbStO9MW6VqCktVTvGDV/hTMkl5qZgUq7FRUtJet0umbDKL7hsuE70TChqqSSD/N1+S4Zx41BGgyvmxm8JDk+0VHqC9GvkCRfgBk+q7Cg8BUFhLFhuQWs22UoDtxsgODGse0cZKB03LZVhL7IAI5DUJwhHWn7q1VFrhZZiBMAanhuXZ8OskvEkAQ4AWaelFEbKftfYKyf6XOZY44DyWmI23hMt9eK+xpXGgNJS9vXenIxBauS6eUxdvWRu4BFdpPCFWcTc1BtqlpZKqN/cb4FR0lLxJgyrbjeCmBtWdeIIaSl3z03pGFVWpR6cVcFlN8+NSQYuxZS0U9ZLWyi8MTex0VJes6HHS0vRivVQcrYJVp3KJ/AEFPumgr3EQ6ZoT0nUuJk5cyaGDh2K9u3bo3379hgxYgTmzJnjes3q1atxxhlnoHv37mhoaMD222+Pp59+OqYe1wC80Dth0iBB4WkzjqL8QgSJ9jZDtRSvFNxXjRsGNLn0nYnLeFMlVe14bvjKLxiBu7ZUhp64La7SCyjH3GQ4aSl3KXhlQDHHc0UcajR5zoYeH1WcVSRkJXevOksK7hbI77uem5eEoCkpvQAkTEv17NkT06ZNQ//+/UEIwf3334/DDz8c7777LgYPHlx1fj6fx+jRo9G1a1c8/vjj6NGjB7766it07Ngx/s6nFZ6s7DQZN8nQKIkn8dO0aArOcdGT4Y8n3aS4qxOHmOeGcCzCNMFf7XhueOI0zJ6b6t8ydhk4AEXP/qyA4blxiwNxmp9cz1URAKGdqb6WXp91qC+WgILRElDMylfkGHPj/FKhOsjImajFrPdI2LgZO3as5e8pU6Zg5syZeO2112yNmz//+c/48ccf8corryCbLQ1437594+hq7UDQUj7bjUA9wbUIR5S2PGFailWdmHoTQs2YyxFETXTPTbFmPDeURtFKG3ZFHIYXWipO40amxg3Dc5PnkoL7MG6cah3JvJ6b+GmpXEaGBPe4QufyC2yPqW9vaY2qpVITc1MsFvHII49gw4YNGDFihO05//rXvzBixAicccYZ2GqrrbDjjjviuuuuQ7HozJ22trZi7dq1lv/qGoqXnAQpenv1ou6JO5Ov+bgnzw2HWsqyCCdVWyq8jZ5SH7zVicOkSgjHHKKeG61mjBv3umu0JpMR22JDNbtW3o4IGT3mhk1L0RpjbjE3PmgpR+NG9pi3Jb74vowsWWNubBAk5obSUhmvuY6EWsof3n//fYwYMQItLS1obm7GE088gUGDBtme+8UXX+C///0vjj32WDz99NP47LPP8Lvf/Q6FQgGTJk2yvWbq1KmYPHlylF8hXeBKlR2B6igoEqJRuNKxE+JvzHju7VQcMCh4NoAIFCFJZiimtJTkModoaYZimuLN3GChUQpAtslymBoshuTaNqstLb0QJy2lGzdw/i2KGgFlL11pqcrfimtumyXVFdfL2dLx2Eu90PWAQy3FiLmpNFAyxnPHIQX3TUvVUEkfpMBzM2DAACxYsACvv/46Tj/9dJxwwglYuHCh7bmapqFr16648847MXz4cPzqV7/C5ZdfjjvuuMPx/hMnTsSaNWuM/5YuXRrVV0kHBC3lsV0PsT7m87nu7eE7SXK4BecSpqU0Utq8nBBFhmL6PSRXWop6bmrEuGHQKFnDc+Mcq5EELUUrrmddjBtrckE/taV4RBOZatVRUnGJHLRUTmLQUpTOrSq/wJ+h2LMHr0ZpqcQ9N7lcDttttx0AYPjw4XjzzTdx0003YdasWVXndu/eHdlsFopS3gR22GEHfPfdd8jn88jlqge2oaEBDQ0N0X2BtMFT8FfiP38ZXlyfYUbkexkvwGP5BQ/u87AXhYRcyeZNqlDUoDgYbJFsuIbnxnm8iT4eWq14bmS5VFSWFG1/Szp+GTe1lEMl6SihZNi0lMW4cZWCB6Cl7J6rpBSlzISMpoBih3adpPO5DNtj6juw3FPIQHr2lMQ9N5XQNA2trfaF7/bcc0989tln0ExJqT755BN0797d1rDZLFGjOQkSMwSCBCeGcu+IfguepIiReG7Ki67TQqtpxFBThSkFlwzPjYtxo3tuSK0YN4DrPKr23DgbN55VMgGQyZWMG3fPDaPmVaCAYhfDPaVe4qxijrlxMG4cSiiU80s5e0udlFZMJPmSFgCJGjcTJ07Eiy++iMWLF+P999/HxIkTMW/ePBx77LEAgPHjx2PixInG+aeffjp+/PFHnHPOOfjkk0/w1FNP4brrrsMZZ5yR1FdIHzaL8gsRGDdUjWLbLn2o7bOGOoInh05U+SG8zIMQN3rzJuXE/1sy5sbsuYHhuUnPGyYTLi8s1Dgse26qv1feodhilDBoKakIzcHIpUaXIkuQ7bxKjuUXAhonntJOxEdLZSqT+NmAXX4higzFtRnqkOgTvnz5cowfPx7Lli1Dhw4dMHToUMydOxejR48GACxZsgSyabHs1asX5s6di/POOw9Dhw5Fjx49cM455+CSSy5J6iukD0nVaAqKpCLyK9UoclP1OeYH10uG1yTzQyQ0nrIsQZElFDXiuNCajZ4wY26o58atWCOlpWrLc+M8jwxaymVT9L2pBUAmV84fU1Bb0aBUP1d5ldEvJ/qcp2abm+Ge2IsUIyGjIkNmeW4c1VL8MTeitlQMuOeee1yPz5s3r+qzESNG4LXXXouoR3UARkQ+gDJVkSIrm6+2VIQKBsBWjRKo3SQpwqSSIqK00BY14liCQWXFWviElNFpKTf5sT6/asu4cVbdlTYqggycE0GqWvwBxblcOc5RzbeioaH6uVJZVapZtJSrh87FSOB5LqNcaxxrS0mAy+8IOMfN0L9VF7VUPigtFbeSNSBSF3MjEBCelAQpWuATKhfAldTLb7CcQXm51LGJyouWoCuZ5SKnC7QklSiJsEA9NwoHLUVStAgz4ZIvKZeRkIVpbrlUBU+ClgJKxo0dzDWvbMGjliIOm3lotFQUpV6cA4qDl1+IwnNTmyV9hHFTbxC0lDdQNYpb236NAJ46NnVGSwHmXDcOMTemN8gwCzlK+oYquxRrlFK4CDPBCCi2BO26ll+IUS2lZKARnSop2Bs3ebfSC4Dz/DT/7eRNcDVukgoodvcYZRUZOSYt5V4V3JWWUmlgeRQxN+mjpYRxU28QaikfbTMeXt/GjZnycri3S9r8QOByJSfjuTEW2ZA9CUqG7bmhwca15bkJZtwkkedGkmUU9KgHlucm67TZsjw35nOqrg2gltK0aBKdchRBZQUUl+tDVUjBeQKKWTSgE2pULVVDkgEBLtRokbNEVV5KDlA3cdBSHg0QLs9NxMZNAh68DOMtMorSC4CJlnLx3IRtTBaLRRQKjLpkQdG2B9C8DsgXgZYWyyFZK6BXOxktDb0ASEC+AFQUq5S1Anq0U7Blo4SWiuujxIbmviBSC1paWmzbLeRb0aOdgq2bM/b9kpqA5l5ApqP1exe10ucAsHE9oNkoGAvF0jltelSNGZq6lY6pqD4GAGq+fH+V2J/jB1qmdN+GDrb3bJ8lKDa3RUu2F9DQxf6cHNCjnYIcipYxy0oqerRT0LHB+TfOonROhxy8zQPjd7Dvd+mcRvvfygdyuZxFSOQXwripN3hKlZ0eF2LgIMFAbVMvB8sA8WhUyUop8zDRXO6dIC2lRTOeXmipMCFnKC3lXGtOCsmYJITgu+++w+rVqwPdhws7nAls3wrkOwFffmk5pBY1XLBvX3wpTQcgVR0HgB2aC7hq365o2yDjS5vjUUHbaypkaFBbNGyyaTdXKOKqfbsiq0j2/eo6Bui4J9DYsfp77Tm99P/frADkH6uvLXYtnZNprL522wlAn18B2MJ2vEA06/2llayvyoeiWrqvJNu2+5MtVJB9xuJL7Afkmm3POWlYWxSKbdCc/xFffrnG+Dynarhq367IyA5jqd9/4L5d0a5R8zYPVH0slZz9eAHAVgcCnUba/1YeIcsy+vXrFzh3Hbdxc/7553PfdMaMGb46IxACBC3lo21eWsrHhqjkALXFn/s8CBIcTyYtxQok9QlaiTrj4rmhMnEp4Hemhk3Xrl3Rpk2bUGOHqrBKAgobgfY9gMYOlkN5VYOycg36yQoAGejar+ryletb0bC+FR2bstiqg40aMCIUlueRRRGt7Xqioalt1fENrQVIqzahIaOg75bVx7EmB7SuAdpuBbTdwnpseQsAAnTuDWRsfstNq4F1EpBtC3TqYz22KgMU1gHN3YE2naqvLarADzqV1qWft/QPblDzwI8qAMn2d/phfSvIhhXYUloLNHYC2nevOoesWI9CUUOvzm3QJlfevlvyKvDjRmRkGf26Nts2//3aFjRtzKNz2wZ0aecha39+A7AagNIAbFHdbwDuv5UHaJqGb7/9FsuWLUPv3r0DPVfcxs27775r+fudd96BqqoYMGAAgFKmYEVRMHz4cN+dEQgBSamOgiJJlRererdTAT+ue1PjJoWem6iMm4x7ZfCo8q5k9JT/iosUXApB4lssFg3DZost/C/k3MgqAJGAXBZobLQcUooalMwGNMoSiCRDqjgOAJlWAilDkG1oQKPN8aigZGRkoQG5nG27eaJAyhSRyWXs+7VRBooS0JCr+t7I6h7RxgYgY7NRa1lgkwTkMtXXNmRK49lgcwwoPas0pqUpRGOwqJTva9NurgCQVhmNkgQ0VP/WACBl8pAkDY2NjWg0GTdQipAyKiRZdvyNlU0apAyQ8zoPJLXUb0WyHy+g/Fs12vxWHtGlSxd8++23UFUV2az/tZ7buHn++eeNf8+YMQPt2rXD/fffj06dSpbvqlWrcOKJJ2LkyJG+OyMQApLKvhkUXDEiUXk5IgooBtjfK7I8N148eOGOJyvnhu8aNwzItBI1j+fG7m2fEzTGpk2bNr7v4QkSHafq8ZQASPRzyX486VVROpfs2y01SIi9kUt0Gbdjv6jM2/YEyXqOl2slzmsR9oCZ7kdIVd+sXgr7tss9q7jWuK1znhvX4XSDcYHzvcMcM0pHFYvFQMaNr9Vl+vTpmDp1qmHYAECnTp1w7bXXYvr06b47IxACajRVdqKVZ6OmpaK6t2u7aaaloom5Uahx41LPiMrEgxg3FJFSUdaWSv9ns3FJkmQybhw2RHo4iq65gMB9U2RvhxwGitOG67rZMowbaM7tBoFUYdzY9Ir9W9obhIa95tK8k2HEBmu8gDDHLKznytfqsnbtWqxYsaLq8xUrVmDdunWBOyUQAHVdWyqhbL5B2mXVlwpCeXG1Gz/Nx8q5UWCl3feJTKb0PVyNG91zI6fJa8mCi6dBkkwbogPKG2K85k3Zc2PfP83Yxx365WagUC+Vg1fIdbMNZBgFgKUv1f22/JYsQ7XKuHEfa/OxtHtuwoIv4+aII47AiSeeiH/84x/4+uuv8fXXX+Pvf/87TjrpJPz85z8Pu48CXsCV3yQi1VEQeEqJHhUt5dB2kDgNpuGUZMxNNPOgXKHYofxCROUADM+NS8yNrB8Lw3MTG1w2Fwnm7cSBlgrZczNv3jxIksSvFNM7cNVVV2GrrbaCJEmYPXs2iP59fHluONu0N4xYngh3ms8/zJ4bu6Nmn4pPWgrOBo7/eWAdL/r72fYs9DHzD189ueOOO3DQQQfhmGOOQZ8+fdCnTx8cc8wxOPDAA3H77beH3UcBL6hZWqqO1VKu965ntZRTzE00tFRGr2eUdfHcKLrRL9sFoaYW7rSULJU+J05v+6ZzJ0yYUKKyJAnZbBb9+vXDxRdf7CnvyR577GEUO3aD0R+i4aOPPsLkyZMxa9YsLFu2DAcddBA7BsTwyvjx3LjdnM47h2t9B6cwIEnY58jf4tzf/8G2bbPnZp+Df4Fzzz23oluESUuVzrNv3jwPvPbbfAf6+1V0rrojCcNznptisYi33noLU6ZMwR/+8Ad8/vnnAIBtt90WbdvayPkE4kW91payZA0Nm8LhVEv5Mm7S7LmJhuZjZUs1aKlMAsaNHnMj14nnBjC/ofLFaRx44IG49957USgU8Pbbb+OEE06AJEm4/vrrubqTy+XQrVs3no4b7dN94vDDDzc217V65mLnzbbaG1AoFPQgU96AYps5llhAsV0bZZTGwYVWMp9bea3pk5JHrLrvdB7YlXMjhKBYLCKTsTEJKsbL9revB1pKURT87Gc/w+rVq9G2bVsMHToUQ4cOFYZNWlCvailzEryaUkuxjLaI1VIcRSTDp6UYMTdGnptwF8KsTkspEkFRtTdwKGVVU54bkwfEDtRz4xinQW+j/39DQwO6deuGXr16Ydy4cTjggAPw7LPPGudrmoapU6eiX79+aGpqwrBhw/D4448bxytpqfvuuw8dO3bE3LlzscMOO6C5uRkHHnggvv2+FJc5ZeoNGDt2bKmvcrmeWFEr4o4bb8BPd+yPhoYG7LTTTnjmmWeMdhZ/9TWkHrvg0cf/gVGjRqGxsREPPfQQAODPf/0HBu97JBo6dEH37t1x5plnGtetXr0aJ595AboM2Q/te+2A/fbbD++9955x/Kqp07HT6F/jzw8+gt69e6O5uRm/+93vUCwWccMNN6Bbn23Rdej+mHLjLMs4rl69GieffDK6dOmC9u3bV9/3qquw00474cEHH0Tfvn3RoUMH/PrXvzbiUCdMmIAXXn0bN93zV0jZRkiShMWLFxvXWwKK3X5IAC+//DJGjhyJpqYm9OrVC+ecczY2btxQOo0ADz74IHbddVe0a9cO3bp1wzHHHIOVepysZPoN58yZg+HDh6OhoQHz58/HPvvsg7PPPhsXX3wxOnfujG7duuGqydeUO0CIhZZavHgxJEnCP576N/Y98hS06dwNw4YNw6uvvmrp+l133YVevXqhTZs2OOKIIzBjxgx07NjR+buGAF+vTjvuuCO++OKLsPsiEAZqvvwCw8MB1BgtxSsFDzlZOCt3j7lPoZdfYGUo1ssvhJBi3QzFVIm6kLenWRRQz02435kQgo15NaL/CDYWNNtjhBBjQ3SMIHEJ3P3ggw/wyiuvWLLBTp06FQ888ADuuOMOfPjhhzjvvPNw3HHH4YUXXnD8/hs3bsQf//hHPPjgg3jxxRexZMkSXHb1DQCAc848Hffeey+AEqWxbNkyAMCdt9+KB++8FVdcfR3+97//YcyYMTjssMPw6aefWjp+6RVX4ZxzzsFHH32EMWPGYObMmTjj0qtxyrE/x/tvvox//etf2G677Yy+HHXUUVi+YiXm/OVWvP3CHOyyyy7Yf//98eOPNJOxhM+/+hpz/vM8nnnmGfz1r3/FPffcg0MOOQRff/01Xvj307j+8rNxxbSb8frrr1vvu3w55syZg7ffftvmvsDnn3+O2bNn48knn8STTz6JF154AdOmTQMA3HTTTRix6zD89tgjsGzJF1i2bBl69eplXGsNKK4eY00/tnTxlzjk4IPwi1/8Av/73//w6KOP4uWXX8a0Ky4uDRtKHq5rrrkG7733HmbPno3Fixfj4rNPKzek49JLL8W0adPw0UcfYejQoQCA+++/H23btsXrr7+OG264AVdfOwXPvviafoX9LLt86k248LTjseCNl7H99tvj6KOPhqq/YLz88ss47bTTcM4552DBggUYPXo0pkyZYnufMOFrRb322mtx4YUX4pprrsHw4cOrvDbt27cPpXMCPmDeqG1yKRjHzOemAbyxKeZzQ2s7QrVUWmkpTQNomYKYpeBGzE3ItFQ2V/bG5POtaGxTnamVem7MhlAY2FQoYtDv54Z6z2p8B+B9yycLrx7DpKW0ClrqySefRHNzM1RVRWtrK2RZxq233goAaG1txXXXXYf//Oc/GDFiBABgm222wfz58zFr1iyMGjXKto1CoYA77rgD2267LQDgzDPPxOSrJgEA2rZtY7ylmymNmbfciBNPPwfjjvwlenRswvXXX4/nn38eN954I2677TbQjfTcs35nEapce+21uOD03+Cck48BOvUFmjrhJz/5CQBg/vz5eOONN7D8k7fRUFwHNHfDH//4R8yePRuPP/44TjnllNKYaBr+fPMNaNd7EAYNGoR9990XixYtwtNPPw05vw4Dtjwc19/+AJ5//nn89Kc/Ld93+XI0NJTmmdN977vvPrRr1w4AcPzxx+O5557DlClT0KFDB+SyWbRpbES3rbYCctY8SdYgYTtaqfT/99z2JxxzzDFGTE7//v1x8803Y9SoUbj8uukgpB1+85vfGNdts802uPnmm/GTn/wEGzesh2TKBn311Vdj9OjRlnaGDh2KSZMmGfe+9dZb8dz8NzB6790dqbwLTz8BhxwwEthye0yePBmDBw/GZ599hoEDB+KWW27BQQcdhAsvvBAAsP322+OVV17Bk08+aXuvsODLuDn44IMBAIcddpjlbYDoLqti0bm2i0DEML+Fa0V7j0BUsStBwKJRqHEgyaWaTUm0HYSWSqy2FEMBBoQ+D3IMWkqNKEMxpaUAoFiwN+oUw7ipIVqKAUkiJcaAqbApYd9998XMmTOxYcMG/OlPf0Imk8EvfvELAMBnn32GjRs3Vm14+XweO++8s2Mf2rRpYxg2ANC9e3esWPmD3oHqebB27Vp8t2wZdtp1d0uv99xzTxPVU+r5rqas98uXL8e3336L/ffeQz/Futm+9957WL9+PbbYdpjp5U7Cpk2bjLgfSBL69toa7dqVN/mtttoKiqKUCjbq99yqy5ZYvny59b4VGakt9wXQt29fw7Ch40DvYYW9rN815kY/9MlHH+Cpjz7Eww8/bDpGoGkavln6FYb26YK3334bV111Fd577z2sWrUKmq5QXPbN19ixz1bGdbvuumtVO9SDY/kOK3+0dqLymh22p98C3buXykYsX74cAwcOxKJFi3DEEUdYzt9tt93SadyYsxULpAzmTbKYtzduUll+gdPDEUUQNJM6om37eFwSV0sxvhMQmeeGVX4h9NpSioICUZCVilALrbbn0Bw4mZA9N01ZBQuvHhPqPQ2s+x5Y/x3QtAXQsWdVu0ZmMWZulNLxtm3bGjTOn//8ZwwbNgz33HMPTjrpJKxfvx4A8NRTT6FHjx6W+1CPhR0qM8lKkmSSJLvn4XEW2BCjvxRNleUQKjbb9evXo3v37pj3z4eAltVA81ZA2y0BwBLjkc1kLNdS9VjpnprxGTUKjPvOm1fVS8t9bcaB3kP/xLbfpXPL39kOVDq/ccMGnHrqqTj77LMtxz/9fh26du+B9Rs2YMyYMRgzZgweeughdOnSBUuWLMGYMWNQKBQsDgm7WFn372Dfv2xGMb4Evb/1e8cPX8aNk2tSIAWoNG5QkSKekBqlpSLsc6K0VELlFyKk+Sjd5Fx+IRopOAAUkEEWRRTy9sYNVVKF7bmRJMlSyDBU5DKlWkpZqfTvyraNmBsnz41zPhlZlnHZZZfh/PPPxzHHHINBgwahoaEBS5YsCW+dt9nI27dvj626d8eCt17DwWP2Nz5/+eWXsdtuu1VcV+55u3bt0LdvXzz30qvYd7fBqJRU77LLLvjuu++QURT07de7VGy0uWtF65x5buzum8mgb9++zt+VgVwui6Km2bZRoqXYnptBQ4Zh4cKFljgjACi0XYtCUcPHH3+EH374AdOmTTNiet566y1TOwHgo9zFgAED8Oabb1o+q/w7CgR6Gjdu3IglS5Ygn7duSJVuLYEYYfYu2G1s5uR+NeW5iVDhlaRaKmpaihRL9GQllUfbjYDm460KHoVxo0oZAK0oFuwDijNEBSQgk42vgGRgMKTLsr4nOho3jBQkRx11FC666CLcdtttuPDCC3HhhRfivPPOg6Zp2GuvvbBmzRq8/PLLaN++PU444QTv/Xfo92lnnovp067FjgO3x6g9dsO9996LBQsWGIoop1w1V111FU477VR07dgWB439OdZpS/Hyyy/jrLPOwgEHHIARI0Zg3PGn4IbLzsT2w36Kb1d/hqeeegpHHHFEiYYxbsfIc2OCcd9x43DDDTdg++23x7fffmu9Lwf69u6B19/9AIsXL0bzFt3RuXPnEhVW9TUlrFixAgsWLDA+aSkUsSnTDif97lwce9gBOPPMM3HyySejbdu2WLhwIf42+ylccs0N6NmzN3K5HG655Racdtpp+OCDD3DNNdeU7+zHuuEes+qbn3XWWdh7770xY8YMjB07Fv/9738xZ86cyDNm+1pdVqxYgUMPPRTt2rXD4MGDsfPOO1v+E0gQkuSeej9K1VEQ8NIokXhuIqSOWGUQjPILIb/1s4zcCGm+rJxM+QUAUFEy1FSHmJuy5yZFhj0LxiZgP55SVVSNFW5qKQDIZDI488wzccMNN2DDhg245pprcOWVV2Lq1KnYYYcdcOCBB+Kpp55Cv379fH4Be+PmxFN+h+N/ewauuuJSDBkyBM888wz+9a9/oX///tbLKvp9wgkn4Mbrfo/b738Mg3fbG4ceeqihsJIkCU8//TT2HvETnHj+Vdh+pxH49a9/ja+++gpbbUVjTXgzFJvyBdP77r03TjzxRGy//fY292Xjwt/9BoosY9DOPzXoonIbVin4ww8/bNlXR+y2K/7+8AMYOHhHvPDCC/jkk08wcuRI7Lzzzvj973+Prt1KsS5bdumC++67D4899hgGDRqEadOm4Y9//KPlu3iH/6zOe+65J+644w7MmDEDw4YNwzPPPIPzzjsv+gr1xAeOOeYYsueee5I333yTtG3blvz73/8mDz74IBkwYAB58skn/dwyNqxZs4YAIGvWrEm6K9Hh2u6ETGpPyI9fVh/buKp0bFJ7QtR83D1zxtplpT5d1cn++NdvlY7P2DH8tp+dVLr3nIn2x//+29Lxl2/xfu9/nlW6dt4N9scf/EXp+Dt/8X5vN+Q3lX/nTTZzfeVnpWNTeoTbLiFk5rzPSJ9LniTnP7rA9vikf35A+lzyJPnDMx+H3vb3k/oSMqk9+XTB/KpjWrFojMnK75b6bmPTpk1k4cKFZNOmTUG6yo/1Kwj55h1CVn5ue/iHZYsJ+eYd0rJise3xj5atIe8tXUXWtxSi7GUV1i0v9Wvd9/b9+uqHDeS9pavI8rUt1Qc1rfSdv3nHfp1a9VXp2Npl9o2v/LR0fMPK6mMbV5WOrVjk0PHvS8d/+NL+eBCs+KR0740/Vh1qLRRJy9f/Kx1vXV91fH1Lgby3dBX5aJn93rXou7XkvaWryLpN9uv6B9+sJu8tXUVa8qr3fi97v9Sv/IbqY5bfim+OnXzyyWSvvfayPeb2fHnZv329Lv73v//FP//5T+y6666QZRl9+vTB6NGj0b59e0ydOhWHHHJIuBaYgDcoWaAAhzd202dhewuCgEmjCFrKW7umcXKbBxGMJ1sKHiUtlQUIULQJKC4U8qCjnMnVIC3l4Lmho+iXlooO5Z7ZgbhkzLVcY9txRjHHUGpLRTFgfAHFxKZ2d/kb2feLMSLB5oHbmBHWb1WSzY8ePRpt27bFnDlzcP/990deqsnX7rZhwwZ07VoK0urUqRNWrFiB7bffHkOGDME777wTagcFfMBtQzXTESmqA1K1GTvFiERh3PBSR34S7SWllpIVQFJKxqLbPIiA5mNJwcvlFyKgpaRMybhRq79zId9iGDe5XC1Jwd03YyOgmKWWijs1PsOIcN1sLdcEKKFgW8iR99oIxotRBBWmo1XGDaOqd7kyuP1xFj3pDrcxcysMUcIbb7yBG264AevWrTPy7px88sk++sEPX8bNgAEDsGjRIvTt2xfDhg3DrFmz0LdvX9xxxx2Gxl0gQfAYN2mKtwGqVV6VAZ9RGjdRJtpj3Tuq8gv0nuom+xw7Uam0wC6cGZUUHACKKI23ZuO5UU0KqmwtGTeSuwdEooUzHS431FKxv8tI+v86GDfGWbbFjky3cfHc+PG+MGp1RVsnyc1zY1ZLOSfxczRujPOq700I4ajC7gK3MePw3Pztb3/z02og+DJuzjnnHCOF9qRJk3DggQfioYceQi6Xw3333Rdm/wT8wG1DTWNdKcAa2Ora7yQCimuQlqL3VDe5BxQnQEsVIpSCl9RSDp4b3eDRiAQl7HIXUYLhpWBKwZOipZieGzeji7FhBjJQEqSlXMakkpZy6JUzLSVZz7O71nyeN7iNmek5TxEb4OsJP+6444x/Dx8+HF999RU+/vhj9O7dG1tuuWVonRPwCTflUVo9N7KcGI3CX/+phmpLme/pOp7hGzcZBi1FY24yEailNN24cfPcqFCQC7muVbTgM25AWBmKE6KlHGNu9NPcDjr1OUjcDK9hlAgt5WLccNJSGsu54mcecHlu0vVM+epNZdHMNm3aYJdddhGGTVrglsAtShokKNxKFcQRUKzZV5IOJYmf470Tkrhr0Y1njuG5USMMKC7qHkBiM/epN6cQLL1X/GBsxsZRm12PEMLcFKMDJy1l2zGNHnS9d6Sem7hpKZSDq909N653Nugny7WWbMx8PbXenCPmJkVeG8CncbPddtuhd+/eOP7443HPPffgs88+C7tfAkHARUulcIF3M8qMfDBJll/w47lJaeblGGJu8o4xN6XPI4m5oZ4bG1qKem4KUgrnviv4PDe2b+zVd4kNUhBaKlHPDcuwCgDXtonNv0xH9Q9lh365CproOQgaUGzzwpKcHM8VvlaXpUuXYurUqWhqajKyNfbs2RPHHnss7r777rD7KOAVtRhQDCRHp0WpaGLeO8Iipq7GYnTjWS6/EL8UXNMNUFvjRk/sp9ad58aNyjDfJmW0FD3N9qCb2gkwti67zdb8uZtxw5IVRWEOShxGAgAtAC3lptb2PQe4aKk6MG569OiBY489FnfeeScWLVqERYsW4YADDsDf/vY3nHrqqWH3UcArata44el3namlkjLaIs1zw5CCR1QVHAA0SaelbIwbmvtGRcqC6ZnwH1AcmI4IAt0wcaSlXDdcljSIl5ay2+IYhpFLtt3gcFG+EbPnJhpayv8UqD1aytcrzMaNGzF//nzMmzcP8+bNw7vvvouBAwfizDPPxD777BNyFwU8gybnqyW1FGAKgK0jtRR3+YUo8ve4jWeE5Rc4peBReG6I7BxETYOMVSncWlqRw+1tH+7RJ4nSUgyjLL20VByeG+dfixB3WsrJ+8JFS/n9SpuL56Zjx444/vjj0dLSgksvvRTffvst3n33XfzpT3/C4YcfHnYfBbyirj03taaW4jWcYs68HCUtRWNuVKckftFJwTUjoNiGllJLxk1RSqFh7wYWLaVvLvZUBr2FBEmSMGHCBIwbN85yzuOPP47GxkZMnz4dEyZMgCRJmDZtmuWc2bNnWzbVefPmQZIkDB48GMVi0XJux44dSylBJM6AYrejrIBiX9SSZHOezbUxZyimnxFIDofdvS/utBSxnOMd1NtVO54bX6vLwQcfjGKxiEceeQSPPPIIHnvsMXzyySdh903ALwyFTswekKDgUnlFaQQ4KJqCKMwMwyllaqkkaSktQlpKLn1nYjPeRC19Z7XmAordNhbAVT7MSNx2991349hjj8XMmTNxwQUXAAAaGxtx/fXXY9WqVcyeffHFF3jggQdsj0lBaClez42jJ4HHc4P4N2uOgGLiMGIs74srLVVxjmfweG7qwbiZPXs2Vq5ciWeeeQYjRozAv//9b4wcOdKIxRFIGFyqoxQu8EklH2RSRwG8K65ybA0gRet5YSIhtRRLCm7QUpkIaCkXLxyVgted58ZEZ1TCbd+54YYbcNZZZ+GRRx7BiSeeaHx+wAEHoFu3bpg6dSqza2eddRYmTZqE1tbqvELsPDduhldAz03lebb9cuhbQhmKy54ba6xU+bC794Wn/FPggGLX2lJ1YNxQDBkyBHvuuSdGjBiBn/zkJ1i+fDkeffTRsPom4BeClvLZrlPQb0S0lNmzFmn+npjVUgpVSznE3KjRScHdDFVi0FIRGPaEAPkNEf23EShsKv3Xut56jJTf811pqYpjl1xyCa655ho8+eSTOOKIIyzHFEXBddddh1tuuQVff/2169c+99xzoaoqbrnlluqDvLSUm+fGT0AxYWXMTYHnxiXTryMtVXGLqltb0gBWXBs41xG732nz3Ph6ymfMmIF58+Zh/vz5WLduHYYNG4a9994bp5xyCkaOHBl2HwW8gktSncK314RoFO48N2Grpczt1RMtlaF5blhqqSgCitmeGy0Kz01hI3Dd1uHfl4XLvgUXLWU6NGfOHPzzn//Ec889h/3228/2tkcccQR22mknTJo0Cffcc49j823atMGkSZNw2WWX4be//S06dOhgHJMM48Ye7gHFdO748dxY9O82l0r69QS2ldaZbQeA0R9nKbgjLWUYqu63tvX60HN4++l083oPKP7rX/+K7bffHg888ABWrlyJt956CzNmzMBhhx2GTp06hd1HAa9IiI4IDB51T9xGACHl7MKBjBsXA8N8XphwVZ9FWFtKdo+5ibL8AqWlJJvvTPQ8N8U0UrIBQAOKeWmpoUOHom/fvpg0aRLWr1/veN/rr78e999/Pz766CPX9k866SRsscUWuP766639com5KRVy1M+zvSvLc0O3Lvd8Mf7UVlFKweuQlop0vPzD11P+5ptvht0PgTBR1+UXIoxNsSuRENQA4aGGJBmQI5Anc5VfiI6W0ghQ1AgU2bqgqhFmKKbfR7IZb1qSIRLPTbaN7kWJAIQA3/2v9O+ug63ZxbNtjH/a0lL6/5tpqR49euDxxx/HvvvuiwMPPBBz5sxBu3btqq7de++9MWbMGEycOBETJkxw7F4mk8GUKVMwYcIEnHnmmcbnkgstxUwuGEgKbrrWT/mGhGpLMQOKWZ4bHlqKt582dzf30bZj9eC5AYCXXnoJxx13HEaMGIFvvvkGAPDggw9i/vz5oXVOwCcELRViuwGpIx5aKipDM2FaCrD33kRJS9HvY2/c6LRUFDmFJAnItY3mv4bmkhGTbQJyTdZjksSIubGnfvr06YMXXngB3333HQ488ECsW7fO9mtNmzYN//d//4dXX33V9esfddRRGDx4MCZPnmwaE2eVl1nR41oV3E9AMY9xkliAbAApOL0D095zoaV857lxS3xYR2qpv//97xgzZgyamprw7rvvGpHya9aswXXXXRdqBwV8gEctlUrjhsMQiLu2lPmzsGtLRU0RusnQIw0oLi9ylcaNphGoehGkKKTgEvXc2NFSeswNiWIORQ62p8LrPt+rVy/MmzcPy5cvx5gxY7B27dqqc4YMGYJjjz0WN998M7OH06ZNw5///Gds2LBBb5PTc2N3M1al6cAxIDw0SxSeG5cMxWbPjS0tRbuVAC1VeSP7mwe7d8jwZdxce+21uOOOO3DXXXchmy0vFHvuuSfeeeed0Don4BNCLeWvXaIBmjUhWdnQkvxRR0kamkkl8ZPNnhvrYkhz3ADRSMHdaClE6bmJGi47l2sSP3qOw0bfs2dPzJs3DytXrnQ0cK6++mpomlOpgjL2228/7LffflBVVe9yqU1ZqvYmmPvlSy0V1DhJKkCWwwIh9r1i5ixypaUY1zLhZpSllJbyFXOzaNEi7L333lWfd+jQAatXrw7aJ4GgELSUx3ZN9yzmAbnJ+jftm583k6SKgZrvG/N4yrKEjCxB1UiV58Zs7EQRcyNlSt9ZdqGlatJzI0n6vuK8mdsSBhW01H333Vd1To8ePVyTsPbt27cqj80+++xj612YO3eu8e+iWvYYEkIsRgxbmhxECp5iz40rLWWWgnv33MjGrd2u9dbbMpIaL//wtbp069YNn332WdXn8+fPxzbbbBO4UwIBIbvRO2kOKE5YLWVuJ6x23RIERllXCkisthRQVkJVlmAomP7OyOEvhpJurMnEee6TmlRLOWwu5jw3xC7mxnJ1rLAaM9Z5wNxswwgoTrPnhklLVR/VGAahcWfba+k5Eail3CqwJwhfxs1vf/tbnHPOOXj99dchSRK+/fZbPPTQQ7jgggtw+umnh91HAa9ISnUUFEmpvMwbfGV8iiED92kEuFJeEXvREoz3yTpkKaa0lCShSkUVBmjMjeyifCNpnPssOG6KxOZf1UflBDYeyURPutFS9uCkpUBsDT7rOXaXp9Fzw1BLWe9QfWdaW8r2WpanjAEOoyxgTuDQ4esV5tJLL4Wmadh///2xceNG7L333mhoaMBFF12Ek08+Oew+CngFDxWSxrdXrs04gn7LMiApACm6eG78Gjdmw6lgjdupU1oKMJdgqIi5KdJgYjl4cKMNpKxu3Nh4bqSIvVXRwtlzQ2HvuQm4qQWAJJU8EJJk57lh9Is7QzFQ2lxNf3N5EgKqrfyCy3PDopZYAcUx01JJugdd4MvUkiQJl19+OX788Ud88MEHeO2117BixQp06NAB/fr1C7uPAl7BFcSawrfXNMqmg7brRnlFWQzUfF9bL0a04+noudFpqUhy3ABQ9JgbxcZrSYOM68pzYzFuqi9LOtTTyJpc0Tn2ZsuZodh8M1OrjJvDMQkgMYfzpstzQ/vF4ctyuDKAWoqLxkuX58ZTb1pbWzFx4kTsuuuu2HPPPfH0009j0KBB+PDDDzFgwADcdNNNOO+886LqqwAvhFoqQNsVm2JQD0el58b23kl4biI2bjKlxbA6oDi6iuAAIGUaAACKXcxNyNSm3RtyZHCkUYjxsXtAcTLmDTVuPNNShvXjJAU3f+5k8Lltb+7jWTolSrWUc74Y55gb924ZtJSt/RFQLQV2v8Mar7CeK08+/t///veYNWsWDjjgALzyyis46qijcOKJJ+K1117D9OnTcdRRR0FRIsi0KuANCaXdDwye4NuovRyhBxQrpYWYaOFTXizwlH6IguZDWQ5eSUuVSy9E85Yn699ZIdXeKimk70zTX2zcuBFNTU2Ms8OCw+bCSvyWMGNADBvCJy3F03Nfnhu2JywSTwSHB0QLnOcmAloqRil4Pl9ar4LaEp6e8sceewwPPPAADjvsMHzwwQcYOnQoVFXFe++9l9ibgYANkkwcFwSppqUCGCBKDlBbbO4dl8EW/3g60lJRll4AIOsxN3aeG1krfWcp4HdWFAUdO3bE8uXLAZSKR0a+/qmk9F9rKyC1mD5vBVSCIpGgIY+WlhbLZfl8K4iaR7FA0NISP21QUIEsCFpbWkCk8mbV0loAUfPQoFT1GQCQV0vfN68CdscBQAUAAmzaBGRMwfotpTGBpDlfm9dK57TkAdl0TlFvF9DHOuTfVc3r97fpW2vpWJ5oKLS2Vv1exUIrSFFDvrUFLaR6687n1dJvrclVY1rQ54Gal9DS4uM75QulfpOiTb85fitOaJqGFStWoE2bNshkgr2EeLr666+/xvDhwwEAO+64IxoaGnDeeecJwyZtELWlfLTtEJ9SDFA007i3btxU3TvBgOKI5wGlpSorg6sR01IZg5YqVh2TghRArUC3bt0AwDBwIsf65aU5tEoDcj+WPy8WgHUroEHGcqIhu9HqSVqzqYB1LSo2NmSw8cf4vbXF1SugoIhCGwnZXKPx+aZCET+szyOXkUHWNVRfuGFlqdJ6kwo0VCcWBACsWVnyZK3LWgUS+fXAxh+B7HpgtUPyQcv9V5c/14rA2hUAJGDDYq9flw3L/Rutx1pWAy1rsR7rkc9uwqZV1nn6/ZqWUnbvdQ3I2STALBQ1LF/bCkUC5A3WebBqYx4bWotoacpgfaOPeaC2AutXlLzrayva5vmtPECWZfTu3TuwXeHJuCkWi8jlygOeyWTQ3NwcqAMCEYCrKnga1VI8OWEi6jeLlgqisHG8d1wxN/FnR6aeG9WBloqkrhQAOVvaKDO2npvSZzTRXxBIkoTu3buja9euKBRsxjds/OtPwJJXgP0mAf3Glj9f8QnwzAVYSdrjlPzvMffcvS2U36wXPsff3voWv9y1F04dFb/Y47tbf4duWIEvRt6IfgP2Mj5/4ZPluPr5hRjSswNu/NXA6gufvA1YPA/Y5zJg4M/tb37XyUDrGuCYx4DOpu/2wd+Bl6cC/fYBDvmj/bX//jPwyRxgz/OAgceWP1+7DJhzASDngN+94vXrsrFpNTDn16V//+51q3py/p+ABQ/hEXUffLrdibjy0AGWSy+64xWs2pDHncfvin5dq/fdr1dtxFX/fANtcxn866y9LMcee+ZjzP1wOU4euQ2O3qG3934vex+YewHQbmvghH9Zjz15K7D4BWDfy4F+R3i/dwVyuRxkOfj64GmnIIRgwoQJaGgoLSAtLS047bTT0LZtW8t5//jHPwJ3TCAAaj6guA5pqaju7dpuemmpqIwbhRo3qI65ofLwMIwboz1FiSfOsLgeWL8U0NYDjaY3frlQ+pxsiW9ai1CyDWjMlfuzqhX4Zl0ReShobGy0uXHE2LgCjdpSaOomS/stmoJv1hXRryDb96vlu9L3Uoj1+5rRugJY/z0gF63nOI2VGdrG0jnFtdZzNmilz3PtnK8NAqlt6f4AkJWBrKmN/Cpg/VKsU9dh5SZSNS5fr1WxemMRDY2NtmPW2Kjhm3VFNGSqr/2xheCbdUUUpYy/edCQLfVbKlaPC89vlQA8GTcnnHCC5e/jjjsu1M4IhISaN26SLDIZgaIpynu7tpvcPMgxpOCR1JUCkNFjbmyNG91zI6dx7rPAmEN5PQYjX9TQhLJxE2kFdg4UpVK/NdXab5q52pGe5KFNg6RvYD6TESfWBEr9zFaXeimQTFUgPsBOo+D0zJU+02Pd/D53NbineDJu7r333qj6IRAmXNPu10JV8ARqYkWV5wZwVoFFnVSOq/RDNDSfY/kFutlGkJ0YKOe5ydqopaiCSsqkcO6zwJifqm7QOEvvkzJuSvNLK1j7TTdbR9UczzrlaKBwrBVRPu9usGRDtzesCsi4Gig0nq0S9DfWCFDUiCUDuKFS9Ev3JFkA2CfSlXVHIBzUbPkFuljZJJ2LOhDa6eENI9FeVDl0mO0mV2Ms65ShWIuWlsrkXGgpPaBYztgEsKYdTs+0/juquhFRGeOkRqxOY4F6bkjRWnhT1RjJHHkMFGooVI2Jaj1uB6dnI+p1RpadXz71tvNQqowbQgjTQMmYvGCh55dSHMYaSO2ekqhxM3PmTAwdOhTt27dH+/btMWLECMyZM4fr2kceeQSSJGHcuHHRdrIWweNCTGMKeqd+a1rwGk8sOC04YRggzDfMJMsv1BktpRs3WRvjhsrD5RBjbmKDU+4qatygNMcqx7ucVygZRaumG11F1ToHmbQUF7UUJS0VoeCC4clVUU1LqaYMz04GofmFwcm48U9L8ZT0Sdeekqhx07NnT0ybNg1vv/023nrrLey33344/PDD8eGHH7pet3jxYlx44YUYOXJkTD2tMXC9sadrIgJwfoDMbwu1SEs5Gm0xeaNs37aipfnopuW4yEa02Wb1gGJFIiiqVgMnQ+rAc+O0IUrlmBszEqel9A2PONBSjv2qV1rK0jY/LWX2yLFoKcC9ppsvGGuJWnrZNCOle0qixs3YsWNx8MEHo3///th+++0xZcoUNDc347XXXnO8plgs4thjj8XkyZOxzTbbxNjbGkINBn8BcHno89XnRNZ2nGqpzSGJX+UiG+1mSz03AFDIWxOKKSiNg5JN4dxngWnc2Htuok6ayIJGjZtipXHD8OAF8tzwBCOzMpJHuFEz2i5AqYpVMxutTs+OIktGnE3osVfm8aiiAdO5p6Qm5qZYLOKRRx7Bhg0bMGLECMfzrr76anTt2hUnnXRSjL2rMZgf+spU3CmdiADYi5X5nNDbTkItlRAtpRVLmUYjbJtuWtU0SbQxN+ZEcfm8Nc7D8NzUpHHjPj+LujakoDoYkw5v+1FDozE3FWqpsgeP5bkJYqDwGEYxKxgtbdv3O0+qPTfmvzMuwfhZViC/75ibCpWXGSmNuUk8k9v777+PESNGoKWlBc3NzXjiiScwaNAg23Pnz5+Pe+65BwsWLOC+f2trK1pby4vc2rXBMyimHma+WFOtVndKXYgATFy0gxEgydakV2EiUloqobdElsEGRBZbkGW8QUYVA5I1GS5qheeGJvZTajLmxn1+arI9LVWObUmX56YcHOsUc8Oh5mN5et3mdqK0lPtzWbCJuTEbJ26Ze7OKjJaCZhPrFhItZdvvdNYrTNxzM2DAACxYsACvv/46Tj/9dJxwwglYuHBh1Xnr1q3D8ccfj7vuugtbbrkl9/2nTp2KDh06GP/16tUrzO6nE24TMdXlF5zUDzG+TTm2HQItFcW9XdvliWGKlpZyKr8QFU0iyTIKpGQAFyu8BQpK3qpMPXlu9N+yaKilKsY7YnUaC8QheFY1ZM1BaCmnMeEos0GNprifSYC5HhSQqfodeY0TIzO4FjIdTAsAm/ppIKV7SuKem1wuh+222w4AMHz4cLz55pu46aabMGvWLMt5n3/+ORYvXoyxY8upxzU9sCmTyWDRokXYdtttq+4/ceJEnH/++cbfa9eurX8Dp8qF2Kb0b0JSa2UDYL9NRRmNz1CjhFN+ISW0VAw0n0FLVdAkUdNSQGlzyKKIQgUtRRVUNItxTcHRq0mNGxpz4/zGnwSIbkQ4xtyklZaKdK3hiLnxWbbEiZbKhzEP5CxQbI0/dtAnEjduKqFpmoVGohg4cCDef/99y2dXXHEF1q1bh5tuusnRYGloaDDKRWw2MLtyzQ+vuXBjyiYiAI64lxjepuJUS8WVoZhopTgbSunFQPOV3yDjV++UlEOtKBYqaSkVkIBMNj0p4rnBpKUcpOAJ01LEwYhgquY8KZ5qTS3FQ0tVeuB4jRv7WDc1jJcKJedg3KTzhTlR42bixIk46KCD0Lt3b6xbtw4PP/ww5s2bh7lz5wIAxo8fjx49emDq1KlobGzEjjvuaLm+Y8eOAFD1+WYPSdInYt46EeNQHQVBKuSZEbyVJK2Wom1VGjcRjmfOSQpu5LmJzpNQ0Jc1tUJ+TD03ZkVVzYDxtk+Nm7RJwYmDxynPolkClV9Iu1rKfZ0r2AUUq1T15v7c5BgqRd95boDkPNA+kahxs3z5cowfPx7Lli1Dhw4dMHToUMydOxejR48GACxZsiSU6qCbJeRsDRo3CVXPtrQdwYPrmCAwpvILtC3qsYjB9U7T6ued1DsRPtdFvRRBsVD2AGvFIrJSKeZGqenyC/Zv+zRZXtyFSlkgDs90ObDcpl+WpJ2bn1rKznNj0EoM48TJc8MM4OaBXb+1YskzbD6eEiRq3Nxzzz2ux+fNm+d6/L777guvM/UGJQsUYH14zf+OqKZQILBolLqjpaKOuXGoYxPDeDovstFvtqqUBQigmoybQqEV1F+TydUfLUUcaCmm5Dpq6P2WNA+0lCXg3WWdYsQhuSutAhhGQcEqgqqrpQghhjKqwGmcGDXdovDg2RmElhfmdL00CLdIvcIuIt/8VuIiJ0wMTptxHNH4TOVFDdaWkhVAqjAQLe3GT0upxehpKZqt11yJ2kxR5WqZlnJQ2Ghy6bd0zEybUJ4bZ+PGxcjl9TAzVYh+lFZxxvfZt23QqibFE69xYsS6OcyDUGgpuz0FSJ3nRhg39Qq7N72U1gAx4JQoiidvRVCwcuxEUVsqFqONYeRGBFaG4ig9CUW9zpJm8tyoJuVUtpaNGwcDmdI/jnmFkqL3de+J5BBQbG/ccG6YkdJSEa6RzNpS1RXeVU7jxK6mm6YRFMNICWA33hY2IF37ijBu6hV2i2FKJXsGZCcaJXkevCZpKUvb6aCl4ogBUW2KNRZ0Q0cjEpQojeSowEtLOWSmTYqWkvSEibLmofwCr5oviIGSYrVU3ibbNLcUPFPtMS2YFIuBpOC2xg39rZRSxfMUIV29EQgPbp6blLkPDciyKfg25n4zg5lrUC1lvnfM4+lcfiF69Q7N+WLnuSkgAyllizAXWPPTkZZKtvyCZNBS1iKmrjE3vC8zQQyUFKedKJDqbNO8+YqM5Jmq+VpSddwXXF+U0ren1OBTLsCFhDa1wLBz2ca64ESglnJMEBhHoLTbeEbnwUiq/AJQLkVgibnRjRvq8q85MOYnUao3REJI4mop2m+5Ii6GBpbb0mW8z1wotFRK0k4QUhXfV7A1bnjVUqZ4HVWrOu4Lti/M6cxODAjjpn5hPPimN6YwgmOjhhEjYup3KsovhEBLJVFN1+4NNYZYn3L5BasnQY2hSjUtRWDOikspqoJUg5QUwAxCpcfNgaTmgNSkjBuDliLWfquutBSH2gmwX+MA0zrHobRKpNQLbdtmjUM58aFqMVB4yy+UXhrMyTOpYSRLMKqGB+q37VqSvj1FGDf1CldaKn0T0YCbxymRlOicC63rvZOkpZKhJ8vlF+KnpWhCO80Uc0PVUmr6krLzgSV7dnnbB5IrvyDr/ar03LjSLNyemwhpqShTZbitcQAkm4SMvOUT7Gip0J45uzmY4j1FGDf1ilqMuQGSM8qiTOLH5PfjDiiO3qiye4ME4ql1pOkxN8Rk3NCEfirStwhzgZOCsRg3aho8NyVlmkIqY25cPHjcMTd1REvZyN+tain/tFRo3lJXWip9z5UwbuoVtaiWAhj9rje1VEL5e2Ipv+AuBWdlWg0CzSZuiwYXqzVPS9nPTxq4a/e2DwTMTBsAcoZ6bqzGjWvNK16qo57UUsa/JShKtRTc1Rg0wU6lGNozV2NxnMK4qVfUpeem3tRSCWVejoOWsnGPA+YaOdEtPVQWbY65UdVaN24Y3r+MjefGJAOXEkraKWep58ZLnpsYaSmaDd24NnmqOJutNm54qSW75JmhVATX+1bqq1BLCSSJpCTVQeGq7qlRtVRStaXM97YbzwjjCjJOhTPDqHHDAPXcmGkpTY+5KdasceO+kdPYFjMVFQcFyIKiG13VtBRPzA3jt7J7rgjhe64s2dBT8CJlapc+G3mb35KlMjRqutl4fQInchS0lEAqYKs6qgW1lJ3KK6HYlLDatru3pgGkGPzezLaTofnsMqUC5YRiUdJSRrFG09zX9O9Mc+DUHBhqPqpKKmjVm1qUY80C9dxkUKmWciu/4DHPjXlMzF4YHlrK3J75XrGsNfZKVqOEgo3iyU/5hVAqgut9K/U15pABnxDGTb2iFssvAMkFFDPVKCGrpSzFAWPwSGnxupIda9zEQEvZeauITkvVrufGnUahMTcFm00tsdILKMfcZCo8N65VroPQUjaBufYdY2VDj6PUi/0al7NJgMlffsEmQ3FotJRL/F4K9xRh3NQrXF2f6ZuIBpJyfUZZJdhtUQh6b2bbyYwn3bQiqU7MALHZPGieG61mPTfuNIrhuVHtYm6So6Uy1HPjGHPjlqGYFVAc4LlKWzZ0c8yNEa9mV36BTwpuF4wc+JkTtJRAKuAqAU6fC9FAQuoee++KZnIXh6yWKsbluUlmPLMOMTehBTe6gCZBMxdrJDTmJoVvmFyw0CjVm6KiS64jUckEgEIDilH2NhU1AppfMJgUPOBzlZh4wT09g92zw01LZaoNo4KbMs0LklqbfUIYN/UKoZby2K5L9k3zcV/3dlEZSIp7ccCgSGg86aalERgViYF4PDf0e0mm348qp7RaNW4YNIqctZGCc2a1jRIZvV9ZlGkpa3LBiGgpOQuwFGJJpctgqaXsvC/cGYqdpeDR5LlJ754ijJt6heLmck3xAp94bSkf/D3z3gn+FgnVlsqYFtJIXORu0L+XZIm5obRUjcbcMGgUGtvi520/SijZRgDWmBtL/h03tRSz/IKNWsrLc+Ua+5IULZV1z1XDpKWcpeCB67kJWkogFWBE5KcWbnVX4ii/YKcECNq2bVBvTBRhYrWlygspXVw1jRhenChpKbtK1ET//qRWPTeAfdC7/lvKOi1lrS2VvBTcznNj7mPWtnCmV1rKPB4e1ji35zKWtcZGySqbaSm7mBsfhTPDeqEwjGs7JWv6nith3NQrasyFaCBpHtysRjFlDQ1EHSVUAsG57RhibkybFt3MzDLlSONAbGgp1DotBbjOI2pEFNJKS0lFaMXSc2XOdSTb5TsKg5bimds1REu5SudNsL82rJib2tpThHFTrxBqKY/t2qhRzA9ukAyvSaYtT2g8ZVkykpHRhdb8NhmlFNyoRG0Tc1PTnhuXeUQDd/M2UvBEPTe5RuPfBb0EhmvpBSActRSXcZN0fJ+DFDxA3IxbMHIuI2gpgXqAUEt5bNdGjRJWu0kuCgkqHCpLMJhlylF6EygtJZPquU/SPPdZqJxHJjUfNW7SFnOTyzUY/y7oijWm0cVLmwZ9rlyvT0otlbOllsp5gdwNFDvDKB+aFFyopQTSgKSC5YIiKS+HnRolLAOE3ttCecWU/MptHkTcdmUJBvr/kgQoEZZfkBRarLF6ESYRlpyIHJWbi+n7KVnngOJIEyYykDV5btTWkueGGQPilZbSCqWyC16uBRibdXK0FH1u8jY5i3hpKTsPXjTlF9K7pwjjpl7hmio7fS5EA0GDBP3Cokap2DwCe27sDKckaal4AssrK4MXtHhiQKQs9dyUAx+NnDc1TUtVBMCanpGMrkpSowgkDQAlk0GRlDZrWryUuVHzqvnMhipdI7z8zgkF27uXmHEqv8BXH4oaRqrZyFVDpqUse0p6RSrCuKlXuLk+07zAJ/U2BVR7OcJq147yiktlYKvMiJeWMjw3ajyeBKNYo2kRpsHFtU1LOcxPlDMB5208N4ElwAGhomSEqPkK48Zps/XquTFfE4SWsiTtTKiYraX8gkn5xhk3YxuvE9ZLhZv0PoV7ijBu6hU15kI0kGS/Kw3CsDxdtp6buKTgyY0n3bwqaamoA1wlXRatkJjfyKNGpeFP/1+SkU0pLQUAhSrjJmRaynxNEFoq7npvjmqpalqKN24ma2MYhRZ7VWN7ijBu6hVJekCCwDXpXMwJ78J6cGWllIk4inuzkOB4VgZG8ubqCArDc2NHS6V57rNQubmY5pDxxq5WUxlJ0lIAUNATJ6qqNaDY0ejipTpkBYBuKPuhe53Gk/d6v2Csze5J/DhjbtRqWkrUlhKoDySlBAiKhPKyWNsOmZayvXd957kBql3kcW22so3nRtZK35nKxGsSLsZN2UtmUtioDPonJhi0FJWCM2NuOOenJAV7rpyu5Wk7CBhrczDjxkUKHmVV8BTuKcK4qVckmVslCJJUeTm5/cNot+reSZZfiKdtQ/URMy2l6KUIzCn/DeVUCmMDuOE0P+WMEWiaL2ogunIoDVJwoGzcFKkUXGXEAnmJ43D0tvKUX6iIIaHXSnKi9d7sMhSXvS/eq4JTSisTKS2VvudKGDf1CtuI/PRGthuwVXnF1G8nNUqYxo3mw30eqF2bBSmmeZCtoEri2mxlWonaTEvp37k+PDfVaj4zxUNLXKj6/ycdc6NKpXlGdM+Nygpw9eR9qVjnvMRWRfm8u7ZLvxepzobuQEtxx9xQpVWkMTd2Stb0PVfCuKlX1JiVbcCNRon6rdvpLTCM3CiJ0VI2Rm7Maim6mcVFS9GEdhlTPSOa0K+mjRtHNV/OQj0ZMU4sD0lMKOrxZpVScOeYmyBxM0FoqZjXGUvbZc+ibQkFzRstZVbNqWF5TCs9XUCq9xRh3NQraiyy3UCSsUKx0lL1r5aqirkxYkCiXXZoPSOLcUMLTKZ57rPgOIeylk2vmgZMdpkv6p4bTael8iyKJYygYD9qqbjj4AD34HAbWorlhbMv3aB78II+dzW2pwjjpl6RVFG4oEiyJlaQhTLJe7u2m6RayippDS2wkYGMnvI/a6KlKEVFZeI1CZc5lJHNnpu0GTelN35NLc07pgfPU2XvALFsST2TjGzolSkUSv/Wx4wRHE7HVCNlejI0laJr2Yj07SnCuKlX1JiVbUCopcJFguOZmBRcp6WyJs8NTegnZ9K3CHPDZX5KkuSoTks65sbw3FRmKHbyJIRCS3nx3Pi4Nggs2dBtKMYKOTchhPvZMY9p6EZujYlUhHFTr3DNgpm+iWig8gHSNIDoQXdx01JhBss5KTMSqS2lxtJ2ZeyAGpZqgwFKS2UtMTelf8u1HHNT5aWwejiMWl5qpVoq4ZgbmRo31jw3Waf6Yp4UTw4Gip/yC3FlDQeqjTJTIDRVvhnPjVamp7Ks8gumMQ1dpVhjcZzCuKlX2KbdL0tHUwvXrKER91sO4OJmwUXpEils68HEsyBV5tyIi5bK6p4bWSIoqqX5nzGMm1qmpdwVd4YxqVnHO2pjkgVNp6WIYdxEoJYyakv5oaViTs8AmNaaippYSsYosUCNGrPyiZeWMl8XWiA/HRdSLL10ArG9KPmBMG7qFfVCS8WVNdTSdj3RUskl3qqkpeKKAaExNwBQyLcAABSUvr+STfHcZ4ExP6tqeaWEltL0jY8UKzw3qaOlYjRuXLKhV9JSZuUT69lRZAmK7FT2JCTjBog/pYUPCOOmXmF+6PWkXmmeiAaSyhoKxKyWSiiOSCvGRvOV69zQRToeKXg212j8O6/XMzI8NzVt3LjPz5wTLZVwhmKN5rkxAopZUvAwDBQvhlHMCkZL287GTaVxAlhpJyc4BfKHRkuZ+5viUAdh3NQrLFZ2peszfS5EA05GQNRZQ4E6V0tVLODmYxGhOsA1Hs9N1mTAqLrnJqPnuaHVs2sSjDlEjUn6pp8Pq6ZQQFR6bvKszdZXrpoaUksBLoZq1tHjmVNkSBKPcVOZgiEkD57FuEmAyvMIYdzUK2wTRdWA58YlUVnkiJI6clRmRBxH5PRmaz4WEehbphHYyJlCPigkWUaB6Inj9NwqNOeNUtNqKfckk5mo6IiAIJW0lMoILE9cLZVAQLFt+QWrccL73FTmyQkt9kpWSi+ZDv1OG4RxU6+otLIJiS+INQiSSokO2AQnRlF+oTLwMW5aSq0+FhEMWorSJKy0+yGiYBRrpMZNiYpT6sJzQ0sNWNV8lan301J+gVQYEcxsu54UTxUqRC+KJznC550Fl3XOKYUCr3GSqTCOmJ4yL6gSRqS3pI8wbuoVZkVUsVCxqdWQWipOhZfjm3GUAcUxGTdEK8XbxEjzlcsvxO9JUCVarJHSUrpqKtvoeE3qwfBq5ipjnFJSfoFUqBCZqrm4FE9JlV8Aqo0y0zpXlUKBs/QChVMKhlCeO8c5KIwbgbggSdaHN07VURAkyoNHSUslrJaibcY4nrkq93p8Aa6Vnhua88aspKo5MNQ9hsompbQU7W9eZUnBNwe1lPM6V122hHrggtFSgcsvAMkJI3xAGDf1jLowbhKgpaJ4cJNWS9E2YxzPsqS1OjAyaqignptWaMUislKJlsrUtFrKXd1TnVcoPhrQDUSx99zY9str0s56VEtlHGglTuOk0nMTamZwc7+1YskjbP48RRDGTT3DvKiYVTK1kMSvkkZJ+G0q1fd2Q2UdmxjHs3qRjW+zpbSUWmhFodBqfJ7J1TAtxVJLOajTko65of2TtArjxm6ztiTt9ENLhVAVPEVqKUKIZw8cNY5Cz1AMWMc7RnGCHwjjpp5hdgebH1wOOWFiqFR5JbLgREAdOaaJjzrrsgxISrntGOMKnDIUxxEDYhRrLORRyJeNm2xNe27cFXfGppiyPDeGcVPpubHL2eJ1w3SKAfEUjJwytZSpxEKhWDZueHLclM6j8yACD55is6cAwnMjEDPMEfleFAhJolLlpVkX8EhRmRI9zFozSsW9k4olKoZYL4uB6sDG+DwJRZTrGRUL5UU4Ww8xNw7qHsOY1KybWoZRjyhy6AZ82XPjstl6TdpZVZLCS8xNpfosRuOmstSLqW2zMapqmueYmZwRyE+gacSoDh6qcaMVyuMGpHJfEcZNPcOOlkqh+9CCKholHTx46PeOU5ZvGLlqQrRUyDVuOGCopdS8QUtpRIKSZqUgC7y0lJouWkrSi5XKmp7nxo2W8qrmq2NaCih54bw+N+aYHWrolq4Pg5ay8dxISslDnDKkr0cC4cEuoDiF7kMLZNlUQbuOaKmk1FLmNmIez+ryC/Gpd4p6yn+t0ApVp6UKyEBK4SLMDcb8dFLJJE1LSQo1bjik4F7nZxADxcXAiBwu/a6s7O01ZsZcm6pgLroZakBxvMpLP6jhJ12AiVo0boCKfiex4NSRWsrcRszjWSUFDzOwkYGibiBrasFi3NQ0GPPTLAUvBaKmQy1VDigu0RiugeVePbWhqKUSTjuhaZaEjJIkWeTgngOKTUYu9eJ5uZ673ymuKwUI46a+UYu0FFDR7yTz3NSBWgpIbDzLm20E+TYYKBdrbEVRp6UKUr0YNw60lJmOCPuNPQAMWkqv71Vwq3nldZ2qB1rKRiFmDsb3WhvKzjAyVwsPrd8pTuAHCOOmvmH3AKXUyrbASIsed8xNZXBiiKnFHQMfY868HGPQZKYqBiS+AFeNem6KBah6NeoiIi68GjUqg9Lp275RW6pcfkE1xVokHXMjGzE3pd+B9s1WNedVzVfpEfVUfkF/9owA7RhLCdiJPUyfZ0zel7xHlaG5/ELeo9KKCUFLCaQCtrRUOq1sC+z6HWv5hQq3fyjlF5z4/Tg9UvF68OjbZ3X5hehpqbLnJm94blTUwNx3g4fyC/RtH0i+/AKNuVH0EhjUyLU1utJAS8VSfsFBUi1bs00XipqhMvRDS6luY+0H5jloUGnp9IgK46aeURe0VJ2qpZLKvBxr+QWnANc4PDflRVijxk2901JKOXkbfWMHQnxr9wlZr8Su6F6VvCst5fElrB5oKcNzIxkKMXO8mqsxaIOsXbxOWM9cQuIEPxDGTT2jLgKK61UtlVDm5SRibigt5TF2IAhoPSNSzENV68W4cVf32G1qOUWGlHDSTlmvxC4bnhu38gtePTchqKWMbOgJqaVsEqyalYZeVYY5G1oqNG+poKUEUoEasrItEGqpcJHQeDqVA4gjwJV6boiah6Yn8aPy8JoFd54bEisFyIKSqaSlaGC5mxTcR8wNIf5oKdpuUi9SNu2a67J5lfSbVXOhK+aSWpt9QBg39Qy5VmkpmucmabVUiNSRhasulosDxsHvyzbjmUD5Ba+BkUFgLtaoqdS4qXXPjbv3z6KwiZECZIF6bjJULeUWWO455sYsqTZlzPVCS9HrE1lr7NdmSiX6koJn7IzcsGkp4blxxcyZMzF06FC0b98e7du3x4gRIzBnzhzH8++66y6MHDkSnTp1QqdOnXDAAQfgjTfeiLHHNQa7iPyUTkQLzA9+rJl8oyy/YLOYhXVv7rbjzU1RmaE49OBGN5gUd6RYL8YNnStEp1Gs6h5jvLUU5bhB2XOTQQUt5ZahmPe5sFNWmj937Zg5G7pqCpCN2Utss8bR4PBS+QWPGYrlciB/WXYfMi1VA3tKojO/Z8+emDZtGt5++2289dZb2G+//XD44Yfjww8/tD1/3rx5OProo/H888/j1VdfRa9evfCzn/0M33zzTcw9rxHYuT7TXBGcwnYzjrO2VARxMY55LeIsvxCzWiqTHC1FTL9lUffcaDVPS5k342pPg7n8gmtxypih6MVKy54blw3Xc54bG08CwPdcSZJDNvQ4van27drSUh4zFJfKL4RNS5n3lJiK//pEor0aO3as5e8pU6Zg5syZeO211zB48OCq8x966CHL33fffTf+/ve/47nnnsP48eMj7WtNwtb1mU4r24I0JPGryBoa6r1j99wklcTPnpaKIw6E0Ky4xQIIjblJYXE/T2DQKLZZbVNASymUloKKokag77cOUvAA5Rf8PFdKrlxzLUW0lC3F6JGWyqvEPWGiH9QQLZUak6tYLOKxxx7Dhg0bMGLECK5rNm7ciEKhgM6dO0fcuxqFUEt5bNc9a2iwe9uoDCSFrzhgaG3HO55089IIUNQi4P/dYKT8L9NSWq0bN45FZXVaKmOSgqvpoaUy1HODojEHgLDKL9jlxMoaqiP29VmggPjjEhnPpMX74vG3dFLNhYIa2lMSN27ef/99jBgxAi0tLWhubsYTTzyBQYMGcV17ySWXYOutt8YBBxzgeE5raytaW1uNv9euXRu4zzUD24j8GljgE1dLVSTWClUtlcCikLBaCqjI1xGDN0EyPDd5kHqhpWhRWQdPg21+kxQYN0q2EQCQIaol/044eW4CehISf5GyX5vtvHD85RdspOBhFU8Vail+DBgwAAsWLMDrr7+O008/HSeccAIWLlzIvG7atGl45JFH8MQTT6CxsdHxvKlTp6JDhw7Gf7169Qqz++lGUsnwgsJO3ZOUZDqstu246rgWBVv1WRzlF8oLamtBQ1GjKpkY4kD072z23JCUxgZ4gt0c1T06VH1UKBLThph8zE02V6KlslArCjm6xNx4Lr/gMz5PTuilgxEykDESMvopv2A2jEIueVJDtFTixk0ul8N2222H4cOHY+rUqRg2bBhuuukm12v++Mc/Ytq0afj3v/+NoUOHup47ceJErFmzxvhv6dKlYXY/3bCNyE+nlW2BXQBsnCnRQQC1Rf+3FA51ZHwnNQHjJhn1Wda0oG4qFMufxxEHYtBS5fGueVoKMG3GatUzTfPGqOZNLQ2eGz1DcVYqIq+W4tgysmSfXNB3zI3PuW3UfFPL6rM4S704rHHUq6UWNUNl6K/8QsgePHN6kZTvKal7ldE0zUIjVeKGG27AlClTMHfuXOy6667M+zU0NKChoSHMLtYOBC3lsV1TG/kN1r6Ede/NiJaSZQkZWYKqEWzIl3OQxCEFl0zFGsuemxqY+yy4zCNzFfY0JfHL5Mqe9Y0tpZcGx802DLXUZk5L2QUj2yZM9IMaoqUSNW4mTpyIgw46CL1798a6devw8MMPY968eZg7dy4AYPz48ejRowemTp0KALj++uvx+9//Hg8//DD69u2L7777DgDQ3NyM5ubmxL5HaiHUUh7bNbWRXx9uu0kuCgm6krOKDFUrYmNr0fJZ1KAxNzIpz31SC3OfBfod1JYqNV9aY25yufLLZcum0ouro9EVpPyCn+cqoRxQrLXZ7H3xGjdjNozyoWcorh1aKlHjZvny5Rg/fjyWLVuGDh06YOjQoZg7dy5Gjx4NAFiyZAlkk2t75syZyOfzOPLIIy33mTRpEq666qo4u14bqKHIdguS6rds57kJyQCh/ScaoG6yfhY1EpwHWUXCpgIMz40sAUoMMTdmzw3q0XNT2Fj1WaQqmQDIWjw3pbnvGFTul5bSCoBeQ8wXLRVzPBpTLUWVb6oPKbitB0+opWLFPffc43p83rx5lr8XL14cXWfqETVLSyUUfGtWo+T1zSNsWgoIn/LibTsBrxHdxDbqxk1cngSZ1jPSCpAMz00NzH0W6JzJb6z6zHhjVyN4Yw8AJZNBkUhQJIJ8VLQUABR8vDQkTkvZG1VZm6Bg7+UXtAjy3NQOLZX8zBeIDkZqclOwXEqtbAvMZRDiLL9gbqcQsgFi5xWKS71jV2MsJi8GVWlszJdoqfiMm9L3k4kKSYv3O0eKyvkJGM8LVdMUNFMgaQqS+AFAQX+PbmktGTeOyh+/5ReA8ph4ea5oO2prud5b3LSUTbJQI6BY85Gh2Fx+IfSq4OY9Jd2hDumY+QLRwCnBVdph+zYVU7/p+BjelZAMEMsbZsheIWbbZvVZzLSU7l4vGzfxBLhKmVKch0Jirk8WNeh8pPNTkg01n23MTQrKLwCAqhs3ra0sz41PWgrw5xG1pfliLvVi67kx01I+PTdFEnH5hXTvKcK4qWckxScHRaI5YSqNm7A8N0opI3EU92YhwfGki+rG1nhpKVqsUSGqQUvVxNxnwaClqueQmZZKU+FMACjoRUtbacxNWLSUrADQDThfxk3FeHq93i880VI+Y24ELSVQt0hKCRAUdqUK4vZyGGqpEB/cKO/N024C40k3sQ2x01Jlz42slb4zDTKuaVTNoeog1EKRIK+GnJk2IAzPTb70W7Bjbjh/K0kK9lxVXuul7SBgrM3BjBsbKXjYtFQNqKWEcVPPqKHIdguS7HdUnhvzvWL33CSplrIGFMdRegGAqRK1WlJMoSwPr2lUzc/qt/28Oe1+Sjw31Lgp5CktxYq5CeB98UNL2dB8kcL8TNqovMpy7nLxS/7yC1FKwRMMGfCIdMx8gWhgSbufbheiBUb5hSRoqQo1SqjGDY2XoPeOO8+NzxT1AUADR2nMTSylF1BWS2XMAcW1MPdZqJyf5qy2prQZLXpG6DRIwQFA1et6FfKljdwxc7KvXDXUQPHxXEX5vLu2a86G3lrxmbn8gslA4fTCmXPkUM9NaJmqa4gNSMfMF4gGNV9+wRSRH1fQmhFgGIGiqUqJFbdxE7/6jC60m2KmpZSsTktBhUxKXqO6oKXkivlpQ0sB5fHmrUcUNYp6vJmqGzfOMTc+glQrn1kvz1XleMa9zji0bSm/oOkGCmd9qIyJllLDVksZL57V5T/SBmHc1DPqgpaKWwoeJS0V4b1d201uHlTF3MRES2UoLYUyLSXXwtxngYOWAuKPcWKhSD03BUaG4qRpqbg9xJa2w6elaGB5aHRwDe0p6Zj5AtHAtip4Oq1sC5JMPhhlXExiMTdJqqV0WkpXS8VVpTpDK1ETFYrhuamDGnMuc8hM+cWtTmOhqKulinlq3ERBS/kwUJJ6JhnZ0MvB4Sblm0daSiNlejL8mJv07ynpmPkC0aCGrGwLUqGWiuBNLsp787SbUG0poFx+IW5aKgsVCvXcZNK5CHuCyxySJMnkKaPGZDqW+CrPjZMnwVdl7zA9N3EZN3KZ4rFpmz4nrar34HDz2Ib+3CVZANgj0jHzBaJBrRs3hU3xZg0FTItdyIUzo763a7vJ0XxZo/xCvDRJxmzcEN24ydaD58Z9DmWVZJImslDUPRWaWlqHmDE3vgwUH89VUukZbNuuNm6o58X8GQvm3zz0eSDUUgKpgFyjaimq5KG1YoD6UEsZ2Y+rlS6RwlJ+gQZsxqOWohly495ss3rMjSwRZEnpO9dHzE3l/LTOIaqK2RhzjBMLmu650Qql38JRNedHzVf1XPkovxC3Wsq27eoMxfR3NH/Gglk1F/pLBR0fUvRXqDRGpGPmC0QDS9r9GqqvY3huNpo+i9kQMEokRKGWSkgKbp4Hda6WojE3ANCglXKr0Nw3NY3KcgEVc6hqvDkVNlFD0w0OontuHI2uIAHFfsqaVI1njLWkq9aa6uDwTXnvnhtZlqDoxmPoz515fOjLZ0r3lHTMfIFoYHEhptvKtiCprKFAfdNSCdB8dBOLO+Ymm2s0/t2I0iIs15Nx4zCHaMC2Md4pyVBseG6Kaael4vTcsGkp+jsC3nJEZSvnQdi0FJDMmHmAMG7qGZZcCpuqP0srksoaCtS3WioBTxjdxDa2xuu5yZoMmTZEr0RdFzE37nPIiHGKebxZ0Cpibpyl4LRCdhDFUw2opQBXlVe24rnJKTIkyYtxU319KLCVsKdzT0nHzBeIBuaJWIsBxUn0uartCNRSScnbabvmzyJG1pRpFQByMXkSJFlGnpQM4pxU2jCVujBuTGoV8986zCUYzH8nDaIbNzTnUGhVwc3nBvH6JBEc69LvXNXv6O25qbo+rNgrWSm9bAKp31PSMfMFooHdpEvpRLSgso9JvE1F0XaU93Zt124exJXnRnb9O0rQekYUmbqgpdyfjcrxTYsUnOjzLQuGR8lPTFiQ5yrRtca57Uo60atxEulzV9Vv4bkRiBt2VE5KJ6IFlWqHJN6mnP4OdO/KRTiBVO9ArDRfZU0b3hTyYUCVrPNIqackfhRypeemYlNMi3Gj95N60ZgZiv0onoy/fSitjGuTfJEq/135nHh9birLboRa0y1Jg9AD0jHzBaKBJNWMlW0BYwGPtu2KtqKoLeXUVlRIymOE6ozEcQa4Fio9N7k6MG4Yhn+lMZOW2lLUuMmCEVgehJZy+tv12gifd89tl/+u9Lh5zexddX2YKQGSfPn0AGHc1DtqxMq2QNBS4SLB8UySJtk8aamUem4UDuNG0/yp+QQtVYV4aal0PlfpmPkC0SHJNxO/SIq+ASKmpRJaFKpc7/GNZ6IxN1Kl56bR4cwaQo3G3NB+ZuCSXJCWXgD8KZ5CuTYdtFTQ56bKOArTg1cjbEBKZr5AZDBPRCVXoqrSjpQuOMHvndCiYK5jY9ePCFG5icVr3FjHN1cPtBRjflYaM2nJc0PnXA4uBVT9qvnCpKVS8iIV2LiJ0shNkOb2AmHc1DvMb+0pnYRVSOmCExiVXrM4Y4nMbcXYblaO8A2SAW2zoKUqyy+kk5ai/aa0lG2AbNGv56YOaamK39FrzE117FWEtJTIUCyQCMwPfi1QUkCiNEqkbadlId0MaSmNSFDiTK0fFRgbedV4p6T8gqSPfZaqpexoKeq58armq3xmAymt4lxrKgNzy38HNU6qY6/CpKVM/ZaUkmc4hUhnrwTCQyUtVQtIkEapS1qqsq3NhJYqmmipAjKQUroIe0KN0lJShefGlZbyOj/rkJaqlG57NU5iCyhO8Z5SB0+7gCtqZCJWISFPQ7x5burfc1MlBY+RliqaDORKWXjNwmNAcfpoKZckfn6LutYhLSVJksVQDRpzI4wbgfqD5Y09ndyoLRLyNNSlWgpIznMTZb4NBjSz50baTIybKpVMOpZ4KWP13LgbNx7XqTpUSwHWFwGvAcHm8xVTlfBQUCN7SjpmvkB0qBEruwpJ9TtSWiolLvAEjZs4N1vNFItRmfOmZsHcENMpBZe5jBtBS5lhpnS9e24k23+HghrZU9Ix8wWiQ1IekKCQE3o7qMfyC5VtxRhYG2kaeAY0k7emWDfGjbtSpdqYTEfMDTVuXMsvUM+NV/VN5Xz28lylqvyCtW2zosxrpmlzAHLoQeVJhQx4hDBu6h0JbWqBYVF5JVl+oV7UUskYudUBrgl5buqFlmJsxpVGQ6h0RABIPBmK/VbmDpOWSmytkaoUYrkAtJR5fEN/5ixiD2HcCCSFGnEhVqEuaakk1VLpoKXipElIPRo3HmipnCJDSknSTjlbSqAoaClzW+7PZBBaKidoKWHc1D1qlZYSaqlwsRnmuTF7bopSet8wPcGDWiotlBQAKBlr+YWcnURd8xtQXKtqKfe12ep98S8FD/2ZE7SUQCpQIxOxCkItFS6SoqWirHHDADF9z2LdeG7c51AuSjoiAKjnJsellgrquakVtZT72hzEQDH/9qF7S2vkhTk9s18gGtSIC7EKdUlLpdcFHhWS9NyQuvTc8MuH0yIDB8qeG0FLmdtyfyZDi7mJ1HOT3j0lPbNfIBqYg79qpfwCkFwgdJTlF6rSxCdVWypOtVSCSeVM31OrG8+N+/y0qmRSREvpdb2yUomWslX/GJ4bj79VkJptkpRcgCyjXfNv6VUtZf7tvV7LRI2U9BHGTb2jRqzsKmwWtNRmUH4hwQzFFloqpcX9PKNGaSmlIqDY1hMRmucmgNoqVbSUfy9ckGBkJgQtJZAK1KxxU4+0lOl7SIq34oBhtp2kFDxOz43pe2r1YtwwPIvmwNM00VK0IjtfhuIAxo2cLXljPF2fgpxarIDiALRU+DE3tbGnpGf2C0SDGkmVXYV6V0vFvSikRC0VZ/kFyWzc1EvMDaOobKSxFgGgZBsBsGpL+c1zE/C5SsOLlM13zgUwUCxS8LCLp9aISCU9s18gGtSIlV2FeqelYjdu0lFbKtYNN1P+znXjuQFc55H1jT09MTfZXGWeG7eYmwCeGz+bbSpoKZbnJk1ScEFLCaQBNWvcpOBtSpLDpY6S9KIlppZKLubG/D1JXRk3zvMoSCXpKJGhMTdSETmF2CcXDCPmxpfnJqW0lDluxqPHU6ilhHFT/zArD2qp/EJC6p5Iyz4katwkoz6TJMlSTyrODVcyfWeSYlWHZ8jOc9SsjAldJRMAVC0FAI0ysT/JqC3l8bdSAqqdXMYzUijuSlaz4slrfahMlBmK5WTWEq8Qxk29o0as7CqkgZYKu90kueoEXclJxYGYY25ILc19FjhpqTR5bnI6LQUAbZSi/UmheG7qlJbyGDcTqQdP0FICqUDNGjcJ9TvKauSJxtwk1zZ9c5QlxFrIUcpsfrRUpCqZAMjmGo1/t1E0+5O0MGJu6omWCiAFF7SUMG7qHjWrlkqo32Y1SuiemyTVUsnNA6qQituTIJuNm1qa+yy4bC5pjblRMhkUSWmzbnIybooh1JaqI7VUWOUXRG0pgfpEjVjZVUiDl6OuaKkkPTelZSZuT4KUKVMhNTX3WXCZR5a3/RQl8QOAAkovDU2yk3EjaCkzgkjBs5bSDRFmKE7xc5Wu2S8QPpIKlguKJD1OdJzCDpZL8rdIsG0a3Bh3gKucqdG5zwKdlzZqvowp8DRN5RcAQNWNm0ZmzI3H3yro3E5KdMGgwIMEh5u9NZUlUAIjStFFiBDGTb1D0FL+2w77rUSWS5mJo7g3CwmOJ11o46ZJlExtuM89w8WzmFZaCgAKen2vNrKTceOTlpIVAPrmL2ipwNcyIWgpgVRA0FL+247iwY3y3jztVv47BuQSMm5kEy1lDi6uebgYN1ZaKqWeG0fjxictJUnBnquU0lLBjBtBSwnjpt4hjBsfbUfkuTHfc7NSS+kxNzHHgJhzq0i1NPdZMOZnuG/7UaNs3LACigN4X4KopcJO2slsN7qYm2il4MJzI5AGCFrKR9sRGiBRGk487Vb+OwbQt8hYsxMDRiXq0h/1ZNy4eG5SKgUHAFWv7+XsufFJS5mvCeK5SdkzmQ1QH8qaI0dIwQXqETUyEasgaKlo2q38dwxILOYmW++0lHuxxbR5bopSRLSU+ZognpuUeVODyLmjlYILWkogDahVz02SyiKqmIiiXXrvRDMUJxNQHLpqgwHFpJaSa2nusyA7z8+0ll8AysZNg6Q6nKAbN74UTwE8N7S9uEt0MFRHWYvyzaNxI8dVfiG9z5UwbuodtWrc1C0tlU4XeJSgi2vcVaozObPnpsHlzBqDyxxKqo4XD8rGjaClADCNhCDB4UItJYyb+oegpfy3LWipUJAGWsqS86bW4TKHJEkyqKm0xdwU9ZibnFNAsd/yC+ZraomWYmRDT2+GYkFLCaQBwrjx0XaUaql08vtRIptQ+YWM2bjJ1pPnxn0OJRXAzUJR91Q0OnpugsTcBHiuknrhsLQdnRQ89HkgPDcCqYCgpXy0LWipMJFUnhtzsUalLgOKHYwbakymrPyCRj03jjE3QWipEPLcJPHy50KnBSq/IPu/lokaeWFO1+wXCB81MhGrIGipaNqt/HcMMGJuYk4ql82Vv2d9em7s51BSNCAL1HPjHHOTtFoqzZ4bb8+OLEtG/JWgpRLAzJkzMXToULRv3x7t27fHiBEjMGfOHNdrHnvsMQwcOBCNjY0YMmQInn766Zh6W6MQtaW8Q45Q0eSidIkUidaW0tVSHhUfgdut25gbhnEjp5OWInpAsbPnxmdtKSDYc5WkcSM7t21Wuyk+6oRFVtNN1JZio2fPnpg2bRrefvttvPXWW9hvv/1w+OGH48MPP7Q9/5VXXsHRRx+Nk046Ce+++y7GjRuHcePG4YMPPoi55zWEWqel4s4aCghaKmQkRkuZMhRn6spzw0lLpcxzo+kbYRaCliq3zc42nVNkSJJ3AyUbVWC5iLlhY+zYsTj44IPRv39/bL/99pgyZQqam5vx2muv2Z5/00034cADD8RFF12EHXbYAddccw122WUX3HrrrTH3vIZQ67RUIguOoKXCRFK0lCTLyJOSYazUlXFTm7QU0Y2bdsXVwOol1f+pLaUTBS0FwPxS4O+5Ma4PO/ZKVkovnUCq95SYsxY5o1gs4rHHHsOGDRswYsQI23NeffVVnH/++ZbPxowZg9mzZzvet7W1Fa2trcbfa9euDaW/NYMasbKrQPudhNuTjlMkSfwSWkgTnAfZhGgpoFTPKIeiJaFfzYPxbGQDbopRgejzbsS39wM33u98YtzUUqJrjXPbNLeN3+SX5ecugnmg5ErGaIr3lMSNm/fffx8jRoxAS0sLmpub8cQTT2DQoEG253733XfYaqutLJ9ttdVW+O677xzvP3XqVEyePDnUPtcUlAww+OdAy2qgbZeke8OPztsAPX8CdBsSf9sDDgK+eB7oPzr8e+8wFlj2HtBvVPj3dkNzV2CbfYE2nWOn+fYZ0BVP/m8Z9t+ha6ztAsD7W4xBh3WfYbt+9mtKTWKbfYBO/YCBh9gePnRodxSKGnbq1SnefjGQG3QI1syfh3ZKHrITzdJ9GNCpr/eb7zAW+O59f89Vnz2BztsCgw73fm1Q7Phz4H8q0HPXqkPbbNmMYb06YqeeHXzd+vCdtsb8z1ZiYLf2QXtZjaG/BFZ+CnTsE/69Q4JECCFJdiCfz2PJkiVYs2YNHn/8cdx999144YUXbA2cXC6H+++/H0cffbTx2e23347Jkyfj+++/t72/neemV69eWLNmDdq3j+BHFxAQEBAQEAgda9euRYcOHbj278Q9N7lcDttttx0AYPjw4XjzzTdx0003YdasWVXnduvWrcqI+f7779GtWzfH+zc0NKChoY74dgEBAQEBAQFXpCviDICmaRZPixkjRozAc889Z/ns2WefdYzRERAQEBAQENj8kKjnZuLEiTjooIPQu3dvrFu3Dg8//DDmzZuHuXPnAgDGjx+PHj16YOrUqQCAc845B6NGjcL06dNxyCGH4JFHHsFbb72FO++8M8mvISAgICAgIJAiJGrcLF++HOPHj8eyZcvQoUMHDB06FHPnzsXo0aVAziVLlkA2KSz22GMPPPzww7jiiitw2WWXoX///pg9ezZ23HHHpL6CgICAgICAQMqQeEBx3PASkCQgICAgICCQDnjZv1MXcyMgICAgICAgEATCuBEQEBAQEBCoKwjjRkBAQEBAQKCuIIwbAQEBAQEBgbqCMG4EBAQEBAQE6grCuBEQEBAQEBCoKwjjRkBAQEBAQKCuIIwbAQEBAQEBgbqCMG4EBAQEBAQE6gqJVwWPGzQh89q1axPuiYCAgICAgAAv6L7NU1hhszNu1q1bBwDo1atXwj0REBAQEBAQ8Ip169ahQ4cOrudsdrWlNE3Dt99+i3bt2kGSJE/Xrl27Fr169cLSpUtFXSpOiDHzBjFe3iDGyzvEmHmDGC/viGrMCCFYt24dtt56a0tRbTtsdp4bWZbRs2fPQPdo3769mOQeIcbMG8R4eYMYL+8QY+YNYry8I4oxY3lsKERAsYCAgICAgEBdQRg3AgICAgICAnUFYdx4QENDAyZNmoSGhoaku1IzEGPmDWK8vEGMl3eIMfMGMV7ekYYx2+wCigUEBAQEBATqG8JzIyAgICAgIFBXEMaNgICAgICAQF1BGDcCAgICAgICdQVh3AgICAgICAjUFYRx4wG33XYb+vbti8bGRvz0pz/FG2+8kXSXUoEXX3wRY8eOxdZbbw1JkjB79mzLcUIIfv/736N79+5oamrCAQccgE8//TSZzqYAU6dOxU9+8hO0a9cOXbt2xbhx47Bo0SLLOS0tLTjjjDOwxRZboLm5Gb/4xS/w/fffJ9Tj5DFz5kwMHTrUSAo2YsQIzJkzxzguxssd06ZNgyRJOPfcc43PxJhZcdVVV0GSJMt/AwcONI6L8arGN998g+OOOw5bbLEFmpqaMGTIELz11lvG8STXfmHccOLRRx/F+eefj0mTJuGdd97BsGHDMGbMGCxfvjzpriWODRs2YNiwYbjttttsj99www24+eabcccdd+D1119H27ZtMWbMGLS0tMTc03TghRdewBlnnIHXXnsNzz77LAqFAn72s59hw4YNxjnnnXce/u///g+PPfYYXnjhBXz77bf4+c9/nmCvk0XPnj0xbdo0vP3223jrrbew33774fDDD8eHH34IQIyXG958803MmjULQ4cOtXwuxqwagwcPxrJly4z/5s+fbxwT42XFqlWrsOeeeyKbzWLOnDlYuHAhpk+fjk6dOhnnJLr2EwEu7LbbbuSMM84w/i4Wi2TrrbcmU6dOTbBX6QMA8sQTTxh/a5pGunXrRv7whz8Yn61evZo0NDSQv/71rwn0MH1Yvnw5AUBeeOEFQkhpfLLZLHnssceMcz766CMCgLz66qtJdTN16NSpE7n77rv/v727jW2q7MMAfnWrLe3EdbDZlZctI5t7YXEZKzZ1I0Y3hcUYIItBU0x5ScjGphMxiiaIxGCJ0cWXDyUzumEgNkKyqKDg5GUJ403GBkPmZFKYH1rAAHMbsOH694OPJ89hxWc8bjuju37JSdr7vs/pvSvNyT/n3N1hXv+gu7tb0tLSpL6+Xh555BGprKwUEX7Hwlm3bp3k5OSE7WNeg7366qtSUFBw236tz/28cjME/f39aGpqQlFRkdIWFRWFoqIiHDp0SMOZjX1+vx/BYFCVXWxsLBwOB7P7j66uLgDApEmTAABNTU24efOmKrOMjAwkJSUxMwADAwPw+Xzo7e2F0+lkXv+gvLwcTz75pCobgN+x2zlz5gymTJmCGTNmwOVyobOzEwDzCuerr76C3W7H008/jfvvvx+5ubn4+OOPlX6tz/0sbobgt99+w8DAAKxWq6rdarUiGAxqNKu7w9/5MLvwQqEQXnzxReTn5yM7OxvAX5kZDAZYLBbV2PGeWWtrK+69914YjUaUlpairq4OWVlZzOs2fD4fjh8/Do/HM6iPmQ3mcDhQW1uLXbt2wev1wu/3Y86cOeju7mZeYZw9exZerxdpaWnYvXs3ysrK8MILL2Dz5s0AtD/3j7unghONJeXl5Th16pTq3j6Fl56ejpaWFnR1dWH79u1wu91oaGjQelpj0q+//orKykrU19djwoQJWk/nrlBcXKy8fvDBB+FwOJCcnIwvvvgCJpNJw5mNTaFQCHa7HW+//TYAIDc3F6dOncKmTZvgdrs1nh2v3AxJfHw8oqOjB62Mv3DhAhITEzWa1d3h73yY3WAVFRXYsWMH9u3bh2nTpintiYmJ6O/vx9WrV1Xjx3tmBoMBqampyMvLg8fjQU5ODj744APmFUZTUxMuXryIWbNmQa/XQ6/Xo6GhAR9++CH0ej2sVisz+x8sFgseeOABdHR08DsWhs1mQ1ZWlqotMzNTuZWn9bmfxc0QGAwG5OXlYc+ePUpbKBTCnj174HQ6NZzZ2JeSkoLExERVdr///juOHDkybrMTEVRUVKCurg579+5FSkqKqj8vLw/33HOPKrP29nZ0dnaO28zCCYVC6OvrY15hFBYWorW1FS0tLcpmt9vhcrmU18zsn/X09OCXX36BzWbjdyyM/Pz8Qf/C4ueff0ZycjKAMXDuH/ElyxHC5/OJ0WiU2tpaOX36tKxYsUIsFosEg0Gtp6a57u5uaW5ulubmZgEgVVVV0tzcLOfPnxcRkY0bN4rFYpEvv/xSTp48KfPnz5eUlBS5fv26xjPXRllZmcTGxsr+/fslEAgo27Vr15QxpaWlkpSUJHv37pVjx46J0+kUp9Op4ay1tWbNGmloaBC/3y8nT56UNWvWiE6nk++++05EmNdQ/PevpUSY2a1Wr14t+/fvF7/fL42NjVJUVCTx8fFy8eJFEWFetzp69Kjo9XrZsGGDnDlzRrZu3Spms1m2bNmijNHy3M/i5g589NFHkpSUJAaDQR566CE5fPiw1lMaE/bt2ycABm1ut1tE/vpJ4Nq1a8VqtYrRaJTCwkJpb2/XdtIaCpcVAKmpqVHGXL9+XVauXClxcXFiNptl4cKFEggEtJu0xpYtWybJycliMBgkISFBCgsLlcJGhHkNxa3FDTNTW7RokdhsNjEYDDJ16lRZtGiRdHR0KP3Ma7Cvv/5asrOzxWg0SkZGhlRXV6v6tTz360RERv76EBEREdHo4JobIiIiiigsboiIiCiisLghIiKiiMLihoiIiCIKixsiIiKKKCxuiIiIKKKwuCEiIqKIwuKGiO4K586dg06nQ0tLy4h9xpIlS7BgwYIROz4RjQ4WN0Q0KpYsWQKdTjdomzdv3pD2nz59OgKBALKzs0d4pkR0t9NrPQEiGj/mzZuHmpoaVZvRaBzSvtHR0eP2CcxEdGd45YaIRo3RaERiYqJqi4uLAwDodDp4vV4UFxfDZDJhxowZ2L59u7Lvrbelrly5ApfLhYSEBJhMJqSlpakKp9bWVjz22GMwmUyYPHkyVqxYgZ6eHqV/YGAAL730EiwWCyZPnoxXXnkFtz6NJhQKwePxICUlBSaTCTk5Oao5EdHYxOKGiMaMtWvXoqSkBCdOnIDL5cIzzzyDtra22449ffo0vv32W7S1tcHr9SI+Ph4A0Nvbi7lz5yIuLg4//PADtm3bhu+//x4VFRXK/u+99x5qa2vx6aef4sCBA7h8+TLq6upUn+HxePDZZ59h06ZN+PHHH7Fq1SosXrwYDQ0NIxcCEf17o/J4TiIa99xut0RHR0tMTIxq27Bhg4j89bT00tJS1T4Oh0PKyspERMTv9wsAaW5uFhGRp556SpYuXRr2s6qrqyUuLk56enqUtp07d0pUVJQEg0EREbHZbPLOO+8o/Tdv3pRp06bJ/PnzRUTkxo0bYjab5eDBg6pjL1++XJ599tn/PwgiGnFcc0NEo+bRRx+F1+tVtU2aNEl57XQ6VX1Op/O2v44qKytDSUkJjh8/jieeeAILFizAww8/DABoa2tDTk4OYmJilPH5+fkIhUJob2/HhAkTEAgE4HA4lH69Xg+73a7cmuro6MC1a9fw+OOPqz63v78fubm5d/7HE9GoYXFDRKMmJiYGqampw3Ks4uJinD9/Ht988w3q6+tRWFiI8vJyvPvuu8Ny/L/X5+zcuRNTp05V9Q11ETQRaYNrbohozDh8+PCg95mZmbcdn5CQALfbjS1btuD9999HdXU1ACAzMxMnTpxAb2+vMraxsRFRUVFIT09HbGwsbDYbjhw5ovT/8ccfaGpqUt5nZWXBaDSis7MTqampqm369OnD9ScT0QjglRsiGjV9fX0IBoOqNr1erywE3rZtG+x2OwoKCrB161YcPXoUn3zySdhjvfHGG8jLy8PMmTPR19eHHTt2KIWQy+XCunXr4Ha78eabb+LSpUt4/vnn8dxzz8FqtQIAKisrsXHjRqSlpSEjIwNVVVW4evWqcvyJEyfi5ZdfxqpVqxAKhVBQUICuri40Njbivvvug9vtHoGEiGg4sLgholGza9cu2Gw2VVt6ejp++uknAMD69evh8/mwcuVK2Gw2fP7558jKygp7LIPBgNdeew3nzp2DyWTCnDlz4PP5AABmsxm7d+9GZWUlZs+eDbPZjJKSElRVVSn7r169GoFAAG63G1FRUVi2bBkWLlyIrq4uZcxbb72FhIQEeDwenD17FhaLBbNmzcLrr78+3NEQ0TDSidzyjx2IiDSg0+lQV1fHxx8Q0b/GNTdEREQUUVjcEBERUUThmhsiGhN4h5yIhguv3BAREVFEYXFDREREEYXFDREREUUUFjdEREQUUVjcEBERUURhcUNEREQRhcUNERERRRQWN0RERBRRWNwQERFRRPkTF+cdKbAPPKsAAAAASUVORK5CYII=", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# Compare Reward RL & KNN\n", "plt.plot(range(1, episodes + 1), rl_rewards, label='Reinforcement Learning')\n", "plt.plot(range(1, episodes + 1), knn_rewards, label='KNN')\n", "plt.xlabel('Episode')\n", "plt.ylabel('Reward')\n", "plt.title('Reinforcement Learning vs KNN (Reward)')\n", "plt.legend()\n", "plt.show()\n" ] }, { "cell_type": "code", "execution_count": 1049, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 472 }, "id": "ZuRyZTvfZnNL", "outputId": "8efa9557-6b1e-4628-fbbc-6371b74d369a" }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHHCAYAAABZbpmkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOx9d7gU5f39me3tdm6hN1HEICCKIipgw97QNI2iRs0vlqjRRGMS2zeixhqDJcYek1iiaOwlir2hYEGR3rkXbtt2t878/ph535nZnbrl7sJ9z/PwALuzu7M77cz5nM/5cIIgCGBgYGBgYGBg2EnhqPQKMDAwMDAwMDCUE4zsMDAwMDAwMOzUYGSHgYGBgYGBYacGIzsMDAwMDAwMOzUY2WFgYGBgYGDYqcHIDgMDAwMDA8NODUZ2GBgYGBgYGHZqMLLDwMDAwMDAsFODkR0GBgYGBgaGnRqM7DDsMHj77bfBcRzefvvtgl7f3t6Ok08+GU1NTeA4DnfccUdJ149hxwfHcbjmmmsqvRoDGp988gk8Hg/WrVtX6VUpGq+88gpCoRC2bdtW6VUZ8GBkh6EsePjhh8FxHP3jcrkwdOhQzJs3D5s2barIOl1yySV49dVXceWVV+Kxxx7DEUccUZH12NGwbNkyXHPNNVi7dq2l5a+55hpwHIft27eXd8UYLIPcKDz99NOqx1OpFI455hg4HA48+OCDAORj1+fzaR6rs2bNwg9+8APVY6NGjQLHcbjwwgstf7YerrrqKvzkJz/ByJEj6WM8z+PRRx/Fvvvui8bGRtTU1GDXXXfF6aefjo8++sjS+1YCRxxxBHbZZRfMnz+/0qsy4OGq9Aow7Ny47rrrMHr0aCQSCXz00Ud4+OGH8d577+Hrr7+Gz+ez9V4HHXQQ+vr64PF4ClqX//3vfzj++ONx2WWXFfT6gYply5bh2muvxaxZszBq1KhKr05Z0dfXB5drYJwW0+k0Tj75ZLz00ku4//77cdZZZ6meTyaTuPHGG3HXXXdZfs/7778fV155JYYMGVLQOi1ZsgRvvPEGPvjgA9XjF110ERYsWIDjjz8ep556KlwuF5YvX46XX34ZY8aMwX777VfQ5/UHzjvvPFx22WW49tprUVNTU+nVGbBgyg5DWXHkkUfitNNOw89//nP8/e9/x2WXXYZVq1bh+eeft/1eDocDPp8PDkdhu21HRwfq6+sLeq0WEokEeJ4v2fsxlBaFbB+fzzcgyE46ncYPf/hDvPDCC7jvvvtw9tln5y0zefJk3H///di8ebOl99xjjz2QzWZx4403FrxeDz30EEaMGKEiL+3t7bj77rtxzjnnYOHChbjooovwy1/+EnfeeSeWL1+OX/7ylwV/nl1kMhmkUilbr5k7dy6SySSeeuqpMq0VgxUwssPQrzjwwAMBAKtWrVI9/t133+Hkk09GY2MjfD4f9t577zxCpOXZIZL6smXLMHv2bAQCAQwdOhQ333wzXYbI8oIgYMGCBbS0RrB69WqccsopaGxsRCAQwH777YcXX3xR87P//e9/4/e//z2GDh2KQCCAcDgMAPj4449x1FFHoaGhAcFgEHvuuSfuvPNO29+RrOt7772Hiy66CM3Nzaivr8d5552HVCqFnp4enH766WhoaEBDQwN+85vfQBAE1XvwPI877rgDe+yxB3w+H1pbW3Heeeehu7tbtdyoUaNwzDHH4L333sO0adPg8/kwZswYPProo6r1OeWUUwAAs2fPpr9dob4pu79HV1cXLrvsMkycOBGhUAi1tbU48sgjsXTpUtVyRttn3rx5CIVC2LRpE0444QSEQiE0NzfjsssuQzabVb1PrmeHlORWrlyJefPmob6+HnV1dTjzzDMRj8dVr+3r68NFF12EQYMGoaamBscddxw2bdpk6gNqb2+Hy+XCtddem/fc8uXLwXEc/vrXvwIQScq1116LcePGwefzoampCQcccABef/11Kz85APGC/eMf/xjPPfcc7rnnHpxzzjmay/3ud7+zRV5GjRqF008/3RZBysXChQtx8MEHq47PNWvWQBAEzJgxI295juPQ0tKieqynpweXXHIJRo0aBa/Xi2HDhuH0009XlVU7Ojpw9tlno7W1FT6fD5MmTcIjjzyiep+1a9eC4zjccsstuOOOOzB27Fh4vV4sW7YMgLX9FwBaWlqw55574rnnnivoN2EoDRjZYehXEN9HQ0MDfeybb77Bfvvth2+//RZXXHEFbr31VgSDQZxwwgl49tlnTd+zu7sbRxxxBCZNmoRbb70V48ePx29/+1u8/PLLAMTy12OPPQYAOOyww/DYY4/R/7e3t2P//ffHq6++il/+8pf405/+hEQigeOOO07zs6+//nq8+OKLuOyyy3DDDTfA4/Hg9ddfx0EHHYRly5bhV7/6FW699VbMnj0bL7zwQsHf8cILL8SKFStw7bXX4rjjjsPf/vY3/OEPf8Cxxx6LbDaLG264AQcccAD+/Oc/0+9CcN555+Hyyy/HjBkzcOedd+LMM8/E448/jjlz5iCdTquWXblyJU4++WQcdthhuPXWW9HQ0IB58+bhm2++ob/dRRddBEC8+JHfbvfddzfdLkaw+nusXr0aCxcuxDHHHIPbbrsNl19+Ob766ivMnDlT84KqtX0AIJvNYs6cOWhqasItt9yCmTNn4tZbb8Xf/vY3S+v7wx/+EJFIBPPnz8cPf/hDPPzww3nkZN68ebjrrrtw1FFH4aabboLf78fRRx9t+t6tra2YOXMmnnzyybznnnjiCTidTko4r7nmGlx77bWYPXs2/vrXv+Kqq67CiBEj8Pnnn1v6HplMBj/5yU/w7LPPYsGCBTjvvPN0lx09erRt8nLVVVchk8kUpO5s2rQJ69evx1577aV6nHh3nnrqqTyCmYtoNIoDDzwQd911Fw4//HDceeed+MUvfoHvvvsOGzduBCCS0lmzZuGxxx7Dqaeeij//+c+oq6vDvHnz8m5QAFFtuuuuu3Duuefi1ltvRWNjo+3jeerUqXmlOYZ+hsDAUAY89NBDAgDhjTfeELZt2yZs2LBBePrpp4Xm5mbB6/UKGzZsoMsecsghwsSJE4VEIkEf43le2H///YVx48bRx9566y0BgPDWW2/Rx2bOnCkAEB599FH6WDKZFNra2oS5c+eq1gmAcP7556seu/jiiwUAwrvvvksfi0QiwujRo4VRo0YJ2WxW9dljxowR4vE4XTaTyQijR48WRo4cKXR3d6vem+d529+R/G5z5sxRvX769OkCx3HCL37xC9VnDxs2TJg5cyZ97N133xUACI8//rhqXV555ZW8x0eOHCkAEN555x36WEdHh+D1eoVf//rX9LGnnnoq73c3wtVXXy0AELZt26a7jNXfI5FI0G1AsGbNGsHr9QrXXXcdfUxv+wiCIJxxxhkCANXygiAIU6ZMEaZOnap6DIBw9dVX532Xs846S7XciSeeKDQ1NdH/L168WAAgXHzxxarl5s2bl/eeWrjvvvsEAMJXX32lenzChAnCwQcfTP8/adIk4eijjzZ8Ly2Q34ds8wULFuguS/bBTz/9VFi1apXgcrmEiy66iD4/c+ZMYY899lC9ZuTIkXS9zjzzTMHn8wmbN29WffZTTz1luI5vvPGGAED473//m/fc6aefLgAQGhoahBNPPFG45ZZbhG+//TZvuT/+8Y8CAOGZZ57Je44cT3fccYcAQPjHP/5Bn0ulUsL06dOFUCgkhMNhQRDE/QyAUFtbK3R0dKjey+r+S3DDDTcIAIT29nbD34ChfGDKDkNZceihh6K5uRnDhw/HySefjGAwiOeffx7Dhg0DIJYp/ve//9E75+3bt2P79u3o7OzEnDlzsGLFCtPurVAohNNOO43+3+PxYNq0aVi9erXp+r300kuYNm0aDjjgANX7nXvuuVi7di2VrAnOOOMM+P1++v8vvvgCa9aswcUXX5znByJSfCHf8eyzz1ZJ+fvuuy8EQVB5K5xOJ/bee2/V93zqqadQV1eHww47jH7O9u3bMXXqVIRCIbz11luqz5kwYQItLQJAc3MzdtttN0u/XaGw83t4vV7q0cpms+js7EQoFMJuu+2mqWbkbh8lfvGLX6j+f+CBB1r+nlqv7ezspGXMV155BQDy/CNa3UlaOOmkk+ByufDEE0/Qx77++mssW7YMP/rRj+hj9fX1+Oabb7BixQpL75sLUjIbPXq0peXHjBmDn/3sZ/jb3/6GLVu2WHrN73//+4LUnc7OTgBq1ZfgoYcewl//+leMHj0azz77LC677DLsvvvuOOSQQ1THzn/+8x9MmjQJJ554Yt57kOPppZdeQltbG37yk5/Q59xuNy666CJEo1EsWrRI9bq5c+eiubmZ/r+Q45l8J9ahWDkwssNQVixYsACvv/46nn76aRx11FHYvn07vF4vfX7lypUQBAF/+MMf0NzcrPpz9dVXAxDr60YYNmyYihgA4skl16OihXXr1mG33XbLe5yUaXKzPnIvEsR7lNuKq0Qh33HEiBGq/9fV1QEAhg8fnve48nuuWLECvb29aGlpyfusaDRq+jmA9d+uUNj5PXiex+23345x48bB6/Vi0KBBaG5uxpdffone3t6899a7iPt8PtUFC7D3PXN/J3LxIq9ft24dHA5H3ufvsssult5/0KBBOOSQQ1SlrCeeeAIulwsnnXQSfey6665DT08Pdt11V0ycOBGXX345vvzyS0ufAQA333wzRowYgZNPPhnvv/++pdfYJS+FECQlhBwPGiA2J5x//vlYvHgxtm/fjueeew5HHnkk/ve//+HHP/4xXW7VqlWGxyIgbqtx48blNTpYPeYLOZ7Jd8o9TzH0H3b+tgOGimLatGnYe++9AQAnnHACDjjgAPz0pz/F8uXLEQqFaLfMZZddhjlz5mi+h9kFw+l0aj6uddIsFnqqgREK+Y5630nrceX35HkeLS0tePzxxzVfn3vB78/fjsDO73HDDTfgD3/4A8466yxcf/31aGxshMPhwMUXX6zZaaW3ffS+p1X0x+/04x//GGeeeSaWLFmCyZMn48knn8QhhxyCQYMG0WUOOuggrFq1Cs899xxee+01/P3vf8ftt9+Oe++9Fz//+c9NP2Pw4MF4/fXXccABB+Doo4/GokWLMGnSJMPXjBkzBqeddhr+9re/4YorrrD0Xa666io89thjuOmmm3DCCSdYek1TUxMAmBLQpqYmHHfccTjuuOMwa9YsLFq0COvWrVPl8pQSuftUIccz+U7KbcnQv2Bkh6Hf4HQ6MX/+fGquvOKKKzBmzBgAoox86KGH9vs6jRw5EsuXL897/LvvvqPPG2Hs2LEAxJKD3vr353ccO3Ys3njjDcyYMaMgYqaFUt+N2vk9nn76acyePRsPPPCA6vGenp6qunCMHDkSPM9jzZo1GDduHH185cqVlt/jhBNOwHnnnUdLWd9//z2uvPLKvOUaGxtx5pln4swzz0Q0GsVBBx2Ea665xhLZAcTf/9VXX8XMmTMxZ84cvPvuu6p11sLvf/97/OMf/8BNN91k6TPGjh2L0047Dffddx/23XdfS68ZP348ALH7yir23ntvLFq0CFu2bMHIkSMxduxYfP3114avGTlyJL788kvwPK9Sd6we84Ucz2vWrKGqJENlwMpYDP2KWbNmYdq0abjjjjuQSCTQ0tKCWbNm4b777tOUvMsds37UUUfhk08+wYcffkgfi8Vi+Nvf/oZRo0ZhwoQJhq/fa6+9MHr0aNxxxx3o6elRPUfu+vvzO/7whz9ENpvF9ddfn/dcJpPJW0crCAaDAFDQa7Vg5/dwOp156slTTz1VsRRuPZA7/Lvvvlv1uJ1Avvr6esyZMwdPPvkk/v3vf8Pj8eSpIsTXQhAKhbDLLrsgmUzaWt+JEyfixRdfRDQaxWGHHWb6eyrJy9atWy19xu9//3uk02lVDIQRhg4diuHDh+Ozzz5TPb5169Y87xwgpj+/+eabcDgcVEmZO3culi5dqtkRRfajo446Clu3blX5ozKZDO666y6EQiHMnDnTcD0LOZ4XL16M6dOnG74vQ3nBlB2Gfsfll1+OU045BQ8//DB+8YtfYMGCBTjggAMwceJEnHPOORgzZgza29vx4YcfYuPGjXmZKqXEFVdcgX/961848sgjcdFFF6GxsRGPPPII1qxZg//85z+mAYYOhwP33HMPjj32WEyePBlnnnkmBg8ejO+++w7ffPMNXn31VQDot+84c+ZMnHfeeZg/fz6WLFmCww8/HG63GytWrMBTTz2FO++8EyeffLKt95w8eTKcTiduuukm9Pb2wuv14uCDD87LN8nFbbfdhkAgoHrM4XDgd7/7neXf45hjjsF1112HM888E/vvvz+++uorPP744/TuulowdepUzJ07F3fccQc6Ozux3377YdGiRfj+++8BWFfHfvSjH+G0007D3XffjTlz5uSZ3idMmIBZs2Zh6tSpaGxsxGeffYann34aF1xwge11nj59Op555hkce+yxOOyww/Duu+/SUpIWSGlq+fLl2GOPPUzfnxCk3PwaIxx//PF49tlnIQgC/c02btyIadOm4eCDD8YhhxyCtrY2dHR04F//+heWLl2Kiy++mKp8l19+OZ5++mmccsopOOusszB16lR0dXXh+eefx7333otJkybh3HPPxX333Yd58+Zh8eLFGDVqFJ5++mm8//77uOOOOyylHNs5njs6OvDll1/i/PPPt/w7MJQBFegAYxgAULav5iKbzQpjx44Vxo4dK2QyGUEQBGHVqlXC6aefLrS1tQlut1sYOnSocMwxxwhPP/00fZ1e63luG6wgiO3GI0eOVD0GjdZz8tknn3yyUF9fL/h8PmHatGnCCy+8oFrGrH32vffeEw477DChpqZGCAaDwp577incddddeZ9j9h31fje9du4zzjhDCAaDeevzt7/9TZg6darg9/uFmpoaYeLEicJvfvMb2g4sCOp2YSVmzpypamcXBEG4//77hTFjxghOp9O0DZ2sq9Yfp9Np6/dIJBLCr3/9a2Hw4MGC3+8XZsyYIXz44Yd562i0ffR+I7KeSkCn9Tz3dyfbac2aNfSxWCwmnH/++UJjY6MQCoWEE044QVi+fLkAQLjxxht1fy8lwuGw4Pf781qjCf7v//5PmDZtmlBfXy/4/X5h/Pjxwp/+9CchlUoZvq/R7/PEE08IDodD2GeffYRwOGx47JI2fqPWcyVWrFhB9xmz1nNBEITPP/88LwoiHA4Ld955pzBnzhxh2LBhgtvtFmpqaoTp06cL999/vyqiQRAEobOzU7jggguEoUOHCh6PRxg2bJhwxhlnCNu3b6fLtLe3C2eeeaYwaNAgwePxCBMnThQeeugh1fuQ1vM///nPmutqZf8VBEG45557hEAgQFvaGSoDThDK6ERkYGBgGMBYsmQJpkyZgn/84x849dRTK706OwQOOeQQDBkyJC8sc0fFlClTMGvWLNx+++2VXpUBDebZYWBgYCgB+vr68h6744474HA4cNBBB1VgjXZM3HDDDXjiiSfyWsB3RLzyyitYsWKFptGcoX/BlB0GBgaGEuDaa6/F4sWLMXv2bLhcLrz88st4+eWXqUeEgYGhcmBkh4GBgaEEeP3113Httddi2bJliEajGDFiBH72s5/hqquuGhCT1BkYqhmM7DAwMDAwMDDs1GCeHQYGBgYGBoadGozsMDAwMDAwMOzUYIVkiLNONm/ejJqaGjaojYGBgYGBYQeBIAiIRCIYMmSIYQgsIzsANm/enDdNmoGBgYGBgWHHwIYNGzBs2DDd5xnZAWg8+IYNG1BbW1vhtWFgYGBgYGCwgnA4jOHDh5uO+WBkB/LcmtraWkZ2GBgYGBgYdjCYWVCYQZmBgYGBgYFhpwYjOwwMDAwMDAw7NRjZYWBgYGBgYNipwTw7NpDNZpFOpyu9GgwMOwXcbjecTmelV4OBgWEAgJEdCxAEAVu3bkVPT0+lV4WBYadCfX092traWL4VAwNDWcHIjgUQotPS0oJAIMBOzAwMRUIQBMTjcXR0dAAABg8eXOE1YmBg2JnByI4JstksJTpNTU2VXh0Ghp0Gfr8fANDR0YGWlhZW0mJgYCgbmEHZBMSjEwgEKrwmDAw7H8hxxbxwDAwM5QQjOxbBSlcMDKUHO64YGBj6A4zsMDAwMDAwMOzUqCjZmT9/PvbZZx/U1NSgpaUFJ5xwApYvX65aJpFI4Pzzz0dTUxNCoRDmzp2L9vZ21TLr16/H0UcfjUAggJaWFlx++eXIZDL9+VV2Srz99tvgOM52F9o111yD1tZWcByHhQsXlmXdGMoPtv0YGBh2FlSU7CxatAjnn38+PvroI7z++utIp9M4/PDDEYvF6DKXXHIJ/vvf/+Kpp57CokWLsHnzZpx00kn0+Ww2i6OPPhqpVAoffPABHnnkETz88MP44x//WImvVDWYN28eOI4Dx3Fwu90YPXo0fvOb3yCRSFh+j/333x9btmxBXV2d5dd8++23uPbaa3Hfffdhy5YtOPLIIwtZ/R0es2bNwsUXX1yy5SqBgbz9GBgYdi5UtBvrlVdeUf3/4YcfRktLCxYvXoyDDjoIvb29eOCBB/DPf/4TBx98MADgoYcewu67746PPvoI++23H1577TUsW7YMb7zxBlpbWzF58mRcf/31+O1vf4trrrkGHo+nEl+tKnDEEUfgoYceQjqdxuLFi3HGGWeA4zjcdNNNll7v8XjQ1tZm6zNXrVoFADj++OOL8mOk02m43e6CX8+gDUEQkM1m4XKZH/p2t321IJXh4XZyzA/EwMBAUVWend7eXgBAY2MjAGDx4sVIp9M49NBD6TLjx4/HiBEj8OGHHwIAPvzwQ0ycOBGtra10mTlz5iAcDuObb77R/JxkMolwOKz6szPC6/Wira0Nw4cPxwknnIBDDz0Ur7/+On2e53nMnz8fo0ePht/vx6RJk/D000/T53PLWA8//DDq6+vx6quvYvfdd0coFMIRRxyBLVu2ABDLV8ceeywAwOFw0IsNz/O47rrrMGzYMHi9XkyePFlFdNeuXQuO4/DEE09g5syZ8Pl8ePzxxwEADz74IPbYYw94vV4MHjwYF1xwAX1dT08Pfv7zn6O5uRm1tbU4+OCDsXTpUvr8Nddcg8mTJ+PBBx/EiBEjEAqF8Mtf/hLZbBY333wz2tra0NLSgj/96U+q383q+z722GMYNWoU6urq8OMf/xiRSASAqKotWrQId955J1XX1q5dW9A2fO+993DggQfC7/dj+PDhuOiii1TK52OPPYa9994bNTU1aGtrw09/+lOaXaPchi+//DKmTp0Kr9eL9957D7NmzcJFF12E3/zmN2hsbERbWxuuueYa1Wcry1hkGz3zzDOYPXs2AoEAJk2aRI9Dgvvvvx/Dhw9HIBDAiSeeiNtuuw319fUFffdCEEmkMeeOdzD3ng/67TMZGBiqH1VDdniex8UXX4wZM2bgBz/4AQAxzM/j8eSdLFtbW7F161a6jJLokOfJc1qYP38+6urq6J/hw4dbXk9BEBBPZSryRxAEy+uZi6+//hoffPCBSumaP38+Hn30Udx777345ptvcMkll+C0007DokWLdN8nHo/jlltuwWOPPYZ33nkH69evx2WXXQYAuOyyy/DQQw8BEEsghATdeeeduPXWW3HLLbfgyy+/xJw5c3DcccdhxYoVqve+4oor8Ktf/Qrffvst5syZg3vuuQfnn38+zj33XHz11Vd4/vnnscsuu9DlTznlFHR0dODll1/G4sWLsddee+GQQw5BV1cXXWbVqlV4+eWX8corr+Bf//oXHnjgARx99NHYuHEjFi1ahJtuugm///3v8fHHH9t+34ULF+KFF17ACy+8gEWLFuHGG2+k33f69Ok455xz6O9gZx9TfsYRRxyBuXPn4ssvv8QTTzyB9957T0X40uk0rr/+eixduhQLFy7E2rVrMW/evLz3uuKKK3DjjTfi22+/xZ577gkAeOSRRxAMBvHxxx/j5ptvxnXXXaciw1q46qqrcNlll2HJkiXYdddd8ZOf/IT6495//3384he/wK9+9SssWbIEhx12WB6RLDee/Gwj1myP4fP1PcjyhR8vDAwMOxeqJlTw/PPPx9dff4333nuv7J915ZVX4tJLL6X/D4fDli9GfeksJvzx1XKtmiGWXTcHAY/1TfbCCy8gFAohk8kgmUzC4XDgr3/9KwBR3brhhhvwxhtvYPr06QCAMWPG4L333sN9992HmTNnar5nOp3Gvffei7FjxwIALrjgAlx33XUAgFAoRImpsgRyyy234Le//S1+/OMfAwBuuukmvPXWW7jjjjuwYMECutzFF1+s8mP93//9H37961/jV7/6FX1sn332ASAqHp988gk6Ojrg9Xrp5yxcuBBPP/00zj33XAAiiX7wwQdRU1ODCRMmYPbs2Vi+fDleeuklOBwO7LbbbnR99t13X1vv+/DDD6OmpgYA8LOf/Qxvvvkm/vSnP6Gurg4ejweBQKCoUtD8+fNx6qmnUk/PuHHj8Je//AUzZ87EPffcA5/Ph7POOosuP2bMGPzlL3/BPvvsg2g0ilAoRJ+77rrrcNhhh6nef88998TVV19N3/uvf/0r3nzzzbzllLjssstw9NFHAwCuvfZa7LHHHli5ciXGjx+Pu+66C0ceeSQlv7vuuis++OADvPDCCwX/BnaQ5QU8/MEa+v9Uhoffw4IKGRgYqoTsXHDBBXjhhRfwzjvvYNiwYfTxtrY2pFIp9PT0qNSd9vZ2ehFpa2vDJ598ono/0q2ld6Hxer30QrYzY/bs2bjnnnsQi8Vw++23w+VyYe7cuQCAlStXIh6P513YUqkUpkyZovuegUCAEh1AjPlXlk1yEQ6HsXnzZsyYMUP1+IwZM1SlIQDYe++96b87OjqwefNmHHLIIZrvu3TpUkSj0bxU676+PuobAoBRo0ZRQgKIqp/T6YTD4VA9Rr5Doe9r9jsUgqVLl+LLL7+kJT1AVBZ5nseaNWuw++67Y/HixbjmmmuwdOlSdHd3g+d5AGKH4oQJE+jrlL8tAVF47HwH5WvIiIeOjg6MHz8ey5cvx4knnqhaftq0af1Gdt78th0buvro/xnZYWBgIKgo2REEARdeeCGeffZZvP322xg9erTq+alTp8LtduPNN9+kF+nly5dj/fr1VI2YPn06/vSnP9HIeQB4/fXXUVtbqzrZlwp+txPLrptT8ve1+tl2EAwGadnnwQcfxKRJk/DAAw/g7LPPRjQaBQC8+OKLGDp0qOp1RkQw1zTMcVxR5bXc9SUgowT0EI1GMXjwYLz99tt5zymJsdb6aj1GSEIx70veo1SIRqM477zzcNFFF+U9N2LECMRiMcyZMwdz5szB448/jubmZqxfvx5z5sxBKpVSLa/8bYv5DsrXKD1Z1YAH31+j+n8ymwXATO4MDAwVJjvnn38+/vnPf+K5555DTU0N9djU1dXB7/ejrq4OZ599Ni699FI0NjaitrYWF154IaZPn4799tsPAHD44YdjwoQJ+NnPfoabb74ZW7duxe9//3ucf/75ZVFvOI6zVUqqFjgcDvzud7/DpZdeip/+9KeYMGECvF4v1q9fr1uyKgVqa2sxZMgQvP/++6rPef/99zFt2jTd19XU1GDUqFF48803MXv27Lzn99prL2zduhUulwujRo0q2fqW6n09Hg+y2WzR67Js2TKVT0mJr776Cp2dnbjxxhtpGfazzz4r6jOLwW677YZPP/1U9Vju/8uFbzb34qPVXXA6RAKW5QWkMtVBwhgYGCqPihqU77nnHvT29mLWrFkYPHgw/fPEE0/QZW6//XYcc8wxmDt3Lg466CC0tbXhmWeeoc87nU688MILcDqdmD59Ok477TScfvrp1EfCIOOUU06B0+nEggULUFNTg8suuwyXXHIJHnnkEaxatQqff/457rrrLjzyyCMl/dzLL78cN910E5544gksX74cV1xxBZYsWaLy4mjhmmuuwa233oq//OUvWLFiBV0/ADj00EMxffp0nHDCCXjttdewdu1afPDBB7jqqquKuuCX6n1HjRqFjz/+GGvXrsX27dsN1Y9t27ZhyZIlqj/t7e347W9/iw8++AAXXHABlixZghUrVuC5556jBuURI0bA4/HgrrvuwurVq/H888/j+uuvL/i7F4sLL7wQL730Em677TasWLEC9913H15++eV+aQF/6P21AIAjf9CGgFS6YmSHgYGBoKJkRxAEzT/KbhKfz4cFCxagq6sLsVgMzzzzTJ4XZ+TIkXjppZcQj8exbds23HLLLZZyRAYaXC4XLrjgAtx8882IxWK4/vrr8Yc//AHz58/H7rvvjiOOOAIvvvhiXjmxWFx00UW49NJL8etf/xoTJ07EK6+8gueffx7jxo0zfN0ZZ5yBO+64A3fffTf22GMPHHPMMbSDi+M4vPTSSzjooINw5plnYtddd8WPf/xjrFu3Lq87zw5K9b6XXXYZnE4nJkyYQMtLevjnP/+JKVOmqP7cf//92HPPPbFo0SJ8//33OPDAAzFlyhT88Y9/xJAhQwAAzc3NePjhh/HUU09hwoQJuPHGG3HLLbcU/N2LxYwZM3Dvvffitttuw6RJk/DKK6/gkksugc/nK+vnbosk8fySzQCAsw4YDa9LPK2lsozsMDAwiOCEUhkudmCEw2HU1dWht7cXtbW1qucSiQTWrFmD0aNHl/2kzcCws+Gcc87Bd999h3fffVfz+VIcX3e+sQK3v/E9Jg+vx8LzZ2D/+W9ic28Cz18wA3sOqy9i7RkYGKodRtdvJZj8wcDAUDLccsstOOywwxAMBvHyyy/jkUcewd133122z0tmsnjso3UAgDNnjAIAeIiyw8pYDAwMEhjZYWBgKBk++eQT3HzzzYhEIjT35+c//3nZPu+FpVuwPZpEa60XR00UW+EZ2WFgYMgFIzsMDAwlw5NPPtlvnyUIAm03P336KLidIskhZCfJPDsMDAwSqmZcBAMDA4MdfLq2G99sDsPrcuCn00bQxz1OpuwwMDCowcgOAwPDDokH3xNVnZP2GoaGoDzzjZWxGBgYcsHIDgMDww6HDV1xvLZMDCElxmQCj4vl7DAwMKjByA4DA8MOh0c+WAteAA4cNwi7ttaonqNlLObZYWBgkMDIDgMDww6FaDKDJz7dAAA4a0Z+AKaXlbEYGBhywMgOAwPDDoX/LN6ISDKDMYOCmLlrc97zzLPDwMCQC0Z2GBgYdig8+8UmAMAZ+4+Cw5E/d4uVsRgYGHLByM5OjHnz5uGEE05QPfb000/D5/Ph1ltvxbx588BxHG688UbVMgsXLlQNb3z77bfBcRz22GOPvEne9fX1ePjhh8v1FRgYVIglM/h6Uy8A4JDdWzSXoTk7TNlhYGCQwMjOAMLf//53nHrqqbjnnnvw61//GoA4aPWmm25Cd3e36etXr16NRx99tNyrycCgiy/W9yDDCxha78ewhoDmMqyMxcDAkAtGdgYIbr75Zlx44YX497//jTPPPJM+fuihh6KtrQ3z5883fY8LL7wQV199NZLJZDlXlYFBF5+s6QQATBvdqLsMIzsMDAy5YGTHLgQBSMUq86fAAfW//e1vcf311+OFF17AiSeeqHrO6XTihhtuwF133YWNGzcavs/FF1+MTCaDu+66q6D1YGAoFh+v6QJgQnaoZyeruwwDA8PAApuNZRfpOHDDkMp89u82A56grZe8/PLLeO655/Dmm2/i4IMP1lzmxBNPxOTJk3H11VfjgQce0H2vQCCAq6++Gr/73e9wzjnnoK6uzta6MDAUg2Qmiy829ABgyg4DA4M9MGVnJ8eee+6JUaNG4eqrr0Y0GtVd7qabbsIjjzyCb7/91vD9zj77bDQ1NeGmm24q9aoyMBjiy429SGV4DAp5MGaQPulnOTsMDAy5YMqOXbgDosJSqc+2iaFDh+Lpp5/G7NmzccQRR+Dll19GTU1N3nIHHXQQ5syZgyuvvBLz5s3TfT+Xy4U//elPmDdvHi644ALb68PAUCg+UZSwlN2CuaDKDms9Z2BgkMDIjl1wnO1SUqUxcuRILFq0iBKeV155RZPw3HjjjZg8eTJ22203w/c75ZRT8Oc//xnXXnttuVaZgSEP1K8zSr+EBbCp5wwMDPlgZawBguHDh+Ptt99GR0cH5syZg3A4nLfMxIkTceqpp+Ivf/mL6fvdeOONePDBBxGLxcqxugwMKmSyPBavJcpOk+GyLGeHYaAineUZydcBIzsDCMOGDcPbb7+N7du36xKe6667DjxvfrAcfPDBOPjgg5HJZMqxqgwMKnyzOYxYKotanwvj2/JVSSWYQZlhIILnBRzzl/cw5453kGEl3DywMtZODK1k46FDh+L777/Xfc2oUaPycnRmzZoFQaPt/dVXXy16HRkYrEDp19EaEaEEGxfBMBDRFU9heXsEALAtmsTgOn+F16i6wJQdBgaGqoeVfB0CpuwwDER0hOWb1K5YqoJrUp1gZIeBgaGqwfMCPrXo1wEY2WEYmGiPJOi/u2PpCq5JdYKRHQYGhqrG9x0R9PalEfA4sceQWtPlvaz1nGEAYptS2YkzZScXjOwwMDBUNYhfZ+rIBrid5qcsj9MJgCk7DAML7WGlssPITi4Y2bEILYMuAwNDcbByXFnN1yFgZSyGgYiOiKzsdDNlJw+M7JjA7XYDAOLxeIXXhIFh5wM5rshxlgtBEFSdWFbAyA7DQARTdozBWs9N4HQ6UV9fj46ODgDiMEyjqHoGBgZzCIKAeDyOjo4O1NfXwymVnnKxtjOObZEkPE4HJg2vt/TeNFSQeXYYBhCUyk5XnBmUc8HIjgW0tbUBACU8DAwMpUF9fT09vrTwyZpOAMDk4fXwubUJUS6U4yIEQWA3JwwDAtuUZSym7OSBkR0L4DgOgwcPRktLC9JpxpgZGEoBt9utq+gQfLxaLGHtO8ZaCQuQlR0ASGcFeFyM7OyI6I6l8Pf3VmPuXsMwpjlU6dWpagiCgA5F6znL2ckHIzs24HQ6TU/ODAwMpYOdMEECr4LspLK8ivww7Dh4fulmLHhrFbb0JnDbDydXenWqGt3xNNJZQfF/RnZywc4CDAwMVYmN3XFs6umD08FhrxENll/nUbSnM5PyjotIQlTRN3X3VXhNqh9KczIgKjusg1gNRnYYGBiqEiQ1+QdD6xD0WhehHQ4OLml+FiM7Oy7Itsu9kDPkg5iTRzQGAADJDI++dLaSq1R1YGSHgYGhKkFazve1UcIiYO3nOz6S0rbbGk4wlcIEHRIhHDUoSPd95ttRg5EdBgaGqoTdMEElKNnJsrvbHRWE7CTSPMKJTIXXprpBlJ3WGi8aAx4AbD5WLhjZYWBgqDpsiySxelsMHAfsUwjZkXw7Sabs7LBQbjtWyjIGUXZaar1oCIpkh83HUoORHQYGhqoD8evs1lqDuoB2urIRWBlrx0eKkR3LaJeGgLbW+tAYFI8XlrWjBiM7DAwMVYdi/DoAIzs7A5IZuQS5tZeRHSOQjJ2WGi8apDIW8+yowcgOAwND1YH4dfYd01TQ62mKMhsZscOCKTvWQTw7zTU+NEplLJa1owYjOwwMDFWF3nga320NAyjMrwPIwYJM2dlxofbsJA2WHNgQBAEdtIzFlB09MLLDwMBQVVi8vguCAIxpDqK5xlvQe7Ay1o4P5bbbypQdXfT2pamC2VzjZcqODhjZYWBgqCqQu9TRTcGC30NuPWdkZ0eF0rPDylj6IKpXQ8ANr8spd2MxZUcFRnYYGBiqCoSgFDPTirWe7/hQElVGdvQhm5N9AMBydnTAyA4DA0NVgZQviiI7FShjdYQTuPmV77CxO95vn7kzQ7nttkWSyDCVThNECW2pFUu+DVLrOcvZUYORHQYGhqoCVXacxZAdp/he/Uh2/vnJetz99io8+uG6fvvMnRlKVY4XgO1RdvHWQnuuskM8O2wYqAqM7DAwMFQVCEFxl6CM1Z+enU7pYszC3EqDXKLKSlnayFN2pDJWhhcQSbIxGwSM7DAwMFQVaBmrKGWn/8tY4YTokYin2DyuUoAoO0GPqNKxjixtEM9Oq9S56HM7EZB+M0a8ZTCyw8DAUFUgBMVbhLJTiZydcJ9IdmIpdjddCpBtN7wxAECe/8Sghqzs+OhjLGsnH4zsMDAwVBVK0o1VgdbziDSZO55kyk4pQFrPR0hkhyk72iDpyS2KTCqWtZMPRnYYGBiqCiUpYzkrV8Ziyk7x4HkB6axorqVkp5elKOdCEATqZWpVKDv10vDcLtZ+TsHIDgMDQ1WBqDFFGZQrqOz0Mc8OxdebenHba8tt/ybK7TaiSSpjRZiyk4twIkO9Tc0ayk4PU3YoGNlhYGCoKuywBmXm2cnDza8ux1/+txJvL++w9Tpl2/lwquwwspML4mOq87vhczvp48yzkw9GdhgYGKoKJQkV7OcyVibLIyapF8yzI2Ob5CchJT6rIH4djgOGN/gBMM+OFrT8OgDz7GiBkR0GBoaqQikMyu5+VnaiijyTWCrDwtwkELXL7tgOpbpHvCiRRAZxppqpQPw6JGOHgM3HygcjOwwMDFWFkrSe93OoIPHrAGLaL5vJJYJ4RpLpwsiO1+VAyOuiuTFk6CWDCKLstNb4VI+z+Vj5YGSHgYGhqpAuybiI/lV2evvUF5UYS65FWlHaU04wt4IkLWU6wXEc2iR1h6Uoq0EydprzlB02HysXjOwwMDBUFei4iB2I7CiVHYClKANqAlhoGYuoe62M7GiinaYn5yg7ivlYDCIY2WFgqGIMxEnayRIalJP9VMbKNeCyjqziyE4yj+yIygXryFJjW85cLAJaxoqnwPPMPwZUmOy88847OPbYYzFkyBBwHIeFCxeqno9Go7jgggswbNgw+P1+TJgwAffee69qmUQigfPPPx9NTU0IhUKYO3cu2tvb+/FbMDCUD//6ZAPufnsV/v7umkqvSr+hpAnKFVJ2YqwjCz1xBdlJ28zZySG8rXVE2WGeHSVyJ54T1Etkhxfsd8LtrKgo2YnFYpg0aRIWLFig+fyll16KV155Bf/4xz/w7bff4uKLL8YFF1yA559/ni5zySWX4L///S+eeuopLFq0CJs3b8ZJJ53UX1+BgaGs6IqJJ/fNPX0VXpP+Q0lazynZ6R/SEc7x7LBgQfVvYl/ZEX8/quzUsDJWLgRBoJ6d1hxlx+NyoMbrAsA6sghclfzwI488EkceeaTu8x988AHOOOMMzJo1CwBw7rnn4r777sMnn3yC4447Dr29vXjggQfwz3/+EwcffDAA4KGHHsLuu++Ojz76CPvtt19/fI0dEn2xCPzBmkqvBoMJIpLRlXRdDASUNFTQZhlLEAQk0jz8Hqf5wgrkKTusjIWePvkiW6hnJ+QU36NNUnZsZe2kE4DTAzgK2I94HsgmAbff/mv7CdFkBn2SYpar7ABi+3kkmWFZOxKq2rOz//774/nnn8emTZsgCALeeustfP/99zj88MMBAIsXL0Y6ncahhx5KXzN+/HiMGDECH374oe77JpNJhMNh1Z+BhC9efQSem4fj02furPSqMJiAXEQH0sTndCnKWAWGCv7u2a8x5frXsKHLnkcqt1TA8mCAXmUZy243VjqLC53P4NH2ucCHC+wblBNh4I4fAI+fbOtzKZ46A7h1NyDeVdjr+wGkpFfjc2mSczlrh5WxgConO3fddRcmTJiAYcOGwePx4IgjjsCCBQtw0EEHAQC2bt0Kj8eD+vp61etaW1uxdetW3fedP38+6urq6J/hw4eX82tUHRKrP4STE8Bt+KjSq8JggqhEdrZFkwMmqK4Uyo63QM/O4nVdSKR5fL2p19brcstYzLMD9PbJhC9hJ2eH57HH0v/Dr91PwwkeWPMuLdN0hC0eB9u+A2LbgI2f2V1tERs/BRK9QMe3hb2+H9BB/TpezecbpWGgrCNLRNWTnY8++gjPP/88Fi9ejFtvvRXnn38+3njjjaLe98orr0Rvby/9s2HDhhKt8Y4BLiGeyL1peyd0hv4HSeZNZwV0xwfGHVolDcrkotzTZ++3zm89Z8qOuoxlkfxlUsB/zsa4df+SH4u20zJNKstb86BEpSaVTIGKKHldoqew1/cDZL9OfgkLUCg7rIwFoMKeHSP09fXhd7/7HZ599lkcffTRAIA999wTS5YswS233IJDDz0UbW1tSKVS6OnpUak77e3taGtr031vr9cLr1ebDQ8EOFNi2c6XGVjlux0REUV5pD2coPkZJUH3WmDxI8C+vwBqWkv3vkWA5wWks+Kde0nIjk3PTkLyQPTYJJakjFXrcyGcyDBlBzmt51aUnWQUePJnwKr/Icu58EB6Ds51vQhE2+FxOdAU9KAzlkJ7OImmkMn5OyIp+9kkIAjikC07yEgeuUT13hCaKzssa0eJqlV20uk00uk0HDnmMqfTCZ4XD5ypU6fC7XbjzTffpM8vX74c69evx/Tp0/t1fXckeCWS489GKrwmDGZQzlwquUn5o3uB924Dvni0tO9bBJTkpBSenXRWsJUzQgyfSlXCCoiyQ4y0TNnJ9eyYkJ1YJ/DoccCq/wHuIF6aeAcezswRn4t2ADxvz7cTVUxZz9g8bgRBVnb6euy9th/RQTN2TJQdRnYAVFjZiUajWLlyJf3/mjVrsGTJEjQ2NmLEiBGYOXMmLr/8cvj9fowcORKLFi3Co48+ittuuw0AUFdXh7PPPhuXXnopGhsbUVtbiwsvvBDTp09nnVgG8GaiAIBAlik71Q5leaTkbbfx7dLf1WPCTCvJTgm6sQCRQPkc1rqriALRW6Cy01rrw/ftUTomYSBDHSpo8Hv0bAD+cRKw/XvA3wCc+jRWfluL7VgmPs+ngUQP2up8WLYlbJHsKLLWMgnArU0INMFnAEHaD6tY2WnXmXhOwCafq1FRsvPZZ59h9uzZ9P+XXnopAOCMM87Aww8/jH//+9+48sorceqpp6KrqwsjR47En/70J/ziF7+gr7n99tvhcDgwd+5cJJNJzJkzB3fffXe/f5cdCUE+AnBAiI8UJvEOQKQyPDiuuBEGhXym8o54W6mVnWRU+rt6SK/SY1NSsuM2JztZXqDKkt0yFlV2pLvsOJuNpfI96So725YDj50IhDcBtUOBnz0LNO+G1NffIQU34s5a8aYsslVOUbZNdmweN0qfT4Genb5U1nZ8gV100InnOspOgCk7SlSU7MyaNcvQWd/W1oaHHnrI8D18Ph8WLFigG0zIoEYyk0UNYgAAD9JAug/wBCq8VtWNZCaLQ29bhDq/G/+94ABw/UQOozkXzJK3n6cI2YmW9n2LACEbLgcHh6Pw31lJlKyalJXqg50yliAItBtrsFTGYsqOBc9OJiUTnaZxItGpH65aPu5pQqAvDETb0Vo7FIDVMlaOsmMHSnJUgLLz2IdrcfXz3+C+n+2NwyaUzwsnTzw3U3YGRmODGarWs8NQHvTEkqiBIo23r3pKGNWKVR0xbOjqw9ebwrYNr8UgmtPhU/Ko/GRE/XcVoBTpyQDAcZztrB1l6rEdZSeR5pGRfEFkrMFAT1AWBME8Zye6VSQ6Djdw1quU6ABAKisuH/cMkpbtUEw+t3AcKD07WZvKRpFk56WvtoIXgC/Wd9t+rR2YKTuNZPI5U3YAMLIz4NDT3QkHp1DT+sp7QO4MWLVNVj7i/dhlkxtUR7ovSgaq7Ox8ZEf5HlbJTkKxXK+N1nOynRwc0Cx1CQ30BOVEmlfdGGiWsVKiwgxfLRBsUj1FlJ2El5CdrdSgbDoMlOeLVHYUy9s0KGd5AUs3iq/JVWZLiWgyQ9VDPc8OKWP19qWR6cebtGoFIzsDDNGeTvUDjOyYQkV2bA40LAa5J8vSKzvS90pVXxmrGL8Ogd3280S6MGWHxAPU+NwISfOI+pMUVyNyy4CaZIfsf55g3lNkmyV8srJjuRurr1s0GRPY9uwUrux83x5BXCIhucpsKUFUnZDXhaBX241S53dTO6bd3KidEYzsDDDEwttV/xeqqBOnWrFqW4z+uz+Np+RkOUhSC7ZFSpyiXMXKTimM4MWUsfrSWRX5MQJJCq71uxCQLjwDXdkhypjbKV5ts7yg6rQDIO9/nlDe64myk/Y3iw9E26lBuTOWMt6mSlUHKE7ZsWlQ/mK9vHy4nGTHpBMLAFxOB+r8LEWZgJGdAYZEWE1uMjFGdsywqkNWPvrTeBpJiheMMc3inW8qy9vuEtIFzyvITvV1Y3lLWMayOoQy11ditZRFlR2vG0GpAyc+wD07ZD9tVoT/5W0HUsYyUHbS/hbxgWg7GoMeSmANS7pFk53ClR2lTyeaLJ+a0k79Osbhio2sI4uCkZ0BhlRUXbbKxFgZywg8L2D1dkUZqx/v2Imy0xT0oEGac1OyYMG0rFYhGRUjCKoApRgVQWDbs5PTMWSVWJI7eJWyM8BbzwlRbFaYZ5O5SpkB2SHEMxuUlJ1IOziOoxd3w5JuHtkpovU8FQWy1knLFxt65NUo4z6wjSo7xvlBDawji4KRnQGGbLxH/X+m7Bhic2+f6iLYn16MiHSyDHld9KRWMpOyst1cyIoRBFWAkhqUnfY8O7kdVD0Ww9hI23mNz42AlOeTzPAD2hRKOrEaAm66HfKVHf0yFtkP+ICs7ACw5tsppbIDiBPULaC3L42VChU4d15aKWGljAXIJmUWLMjIzoADn9NdwDw7xlD6dYD+9WKQk2XI57J2R2sHuabkKjEpl2LiOYH9bqwcsmO5jCUpOz43Al45SK4/zezVBqLs1PndtCSpX8bSJztCSMqpSfQAmSRtPzfsyFK2nQPFKTvksy1gqaTquKR8qHIalAnZ0xsCSsDaz2UwsjPAwCV7AABpQTwpC3FWxjLC6m1qEtCf+SnkZFnjc5dB2YkY/79CqKYyltWREWHajeWCx+mgF7uB3JFFyE693w2vm5AdO2UscVs4/I1iDg8gmZQlZcfoOCBDQAnskp3cXB6LZIeYk/ca0SCuRlm7schcLBNlJ8iGgRIwsjPA4JTMqBsFsaWTSzCyY4RVOWSnPw3KpOZf45WVnY5yKTtVYlIuZTeWl7aeW9tmfelcZcfaBYIYlGv9bnAch4BkUh7IHVnktxOVHam0l5uinDJoPSdGdY8TIOpOtIN2ZLUbKjtSGYuT9qGilR1rJuUvNojn0gPHiefWVJY3nglWBNrpxHMTZYcYlFkZi5GdgQZPWryobRDEWrijwNkvAwWrOsS7T7/kxejr1zKWeBEN+Vw0Er4snh2t/1cIJVV2bLae5xpoLRuUSeu5TzQnk9yTgZyiTNrx6wIehbJjo/VcWc6sIWSnnU6VN5yPRcpY0niJoj07FoIFBUHAEqmMdYBEdoDylbK2MWXHNhjZGWDwZsVyRbtrCADAKZW1GLRBlJ09htQC6OfWc1rGctFI+PIpO1VSxqpkgnKesmOv9bzWJ5ZbqLIzgDuyiLlbpewUUMbyuh2yshPZKpdzDbuxpDJW/Qjx735QdtZ2xtETT8PrcmCPIXU0gqAcpax4KkObF8wMyrKyw7qxGNkZQOhLZRESxBNMxC/e9bhS9me/IN1XNa3KdpDJ8rZC+cKJNO16+MHQOgD9HCqo6saS5Pud3bNDLnIlNChbzdkhZSyn5Lmx7tmRW88BWdkZyFk7YaVnR9oOuZ4oK63nHqcDCJGOrA6VsqN5LKcTMjmhZKeIUEHAkmeH5OtMHFoHj8uBkKTylaP9nBC9gMdJE7v10BBkoYIEjOwMIHTFU6iVJp5Hg+LQPVe2TzxBWEV0G3DLbsBT88qwhuXBZ2u7cM6jn2Hc71/Gra99b/l1q6VOrNZaL5WL+/MCplR2WhXKTklSlPO6saqD7KTLUcayPC5CXI4E4dn17NTkKjsD2rMjdWMFlN1YucqOtA96a/Jer1L4Qm3ig4oU5XgqS9UNFWJSCcvplUlSMeMiAEvKDjEnTx5eD0DeF8qh7CjbzjkyD0IHtPWckR1GdgYSumMp1HHiBTzmG4ysIB0odnw7W5cCyV5gw8elX8ESgucFvPrNVsy95wOcfO+HeH1ZOwQBeG7pJsvvQZKTxzaHEPT0/906NSj73GiWlJ1khqcekaKQ59mpDrJT0nERBZaxyORy+54d8QJH9xXWjSV1Y+kZlLWVHUEQ5DKWy6lSdgIeF2ok1aRDy7dD/DqhVsAlmXeLVXYseHaIOXmK1IlFFJdIovTlo3aTaedKNEqenUgyY/k42FnByM4AQlcshVrEAQBZbz16IZ1k7AwDjUmztVIx4+UqhEQ6i39+vB6H3rYI5z22GIvXdcPjdOCUqcPAccCGrj6aPmoG4tcZ2xyqyN06MTeGvC743E4656YkJuUq9ewkK9h6TspYg2vtkZ2IovUcAPwDXNnhecFmzo6a7KSzsnIpKjvEoCx6ceSsHY3jmLSdh1oAl+RnKVTZ8UiKk4my05fK4tst4vEzZUQ9AHlfKEsZy2KgICAScAcZBjrAO7IY2RlA6I1E4efEHV7w1aFHkLogbJGdbeLfqeoZMUDwz4/X44Cb3sLvnv0Kq7fHUOtz4fzZY/HeFbPx51MmYVyL+H2XKCLdjSCTnSAC/azsJNJZWn4h9f8W2pFVApMyITekPbdaurEqaFBOpnnszX2HH8UfhxNZS7OxMlmemtZr/TnKTgn3lY3dcdz2+vc7xAUrkszQU0Ot36CMpTP1XLmcV0V2RNWG+HY0U5RJ23lNm6zsZAskOzWKQEMDfLWpF1leQGutF4OldSsr2SHKjknbOQA4HBwtZQ309nNGdgYQor2dAAAeHDhvLXpRANkhMrHAV82IAUC86Pzxua+xPZrE0Ho//nDMBHxw5SG4fM54elIg9XTlsD4jkPTksS0hmozbX7OxlCfJkHTxtBSVbxVE2SEXkipRdkqZoOy17dnJ4vfuxzF7ywM4wPE1oslM/qTuHCi3E7nAkX2llN1YD7y3Bn95cwWue2FZyd6zXCDGbp/bAZ/bqejGstZ6riSnua3nEAR6PGu2n9MyllLZKbD1nKY3Gys75HwyZXgD9dDIZazyKTutJm3nBKT9fKCnKDOyM4CQiIijIZLOEPxeN3qEIspYQFWVsmKpLDK8eDv5xqUzcfYBo/M6FUg9nZgJjZDO8ljXKZGd5hCdedRfPgxlCcsh6dClVXakC02NZP6sEoNyJZWdvnQWTRBzqIZxooJppu4Qv47f7aQ+o3IoO8Rg+sLSLaXLWioTZL+OeJGlOTsWPTs0a8npEPf9oOTZyaaAvm601ZHRKVpkh5SxlJ6dAlvPLZOdHgByCQsAQt5yGpStTTwnaKQm5YHdfs7IzgBCMioqOylXDXxuJ3oKUXZIGQuomgskICsubidHPRO5ICejpRt7kOWNS3AbuuJIZwX43U601fr6vZ04oiA7BM21Bid5uyB31TVi3lK1KDtESfFWKGenVjLwj3T3ADD37YRz/DoAyqICklJZKsvj8Y/Wl+x9ywFlejIA+LRydjIpgJd+29wyVjqH8Lp9gE+MfhBTlI3KWEplp1CDco6yY2BQFgQBn69Xm5MBZRmrHAZlSdmxUMYC5PZzVsZiGDDIxsQ7lIynFn63s0DPjmLIXjUpO5LiQrw1WhjXUoOgx4l4Kovv240v7qSENaY5CIej/0cARJL5F9FWOh+rhMpO7WDp/9VBdshdfUm6sci0bYtlrGQqgxqIpdnhLvGY6DVpPw8rRkUQEGWnlAGUypLY4x+vK9sYglKgV9F2DkA7QVlpkM8tY2kRXlX7OSljaRwHxLMTaivCoCyRoxqFsqPjT9zSm0BHJAmng8NEKYsLkI/bsig7YZvKDktRBsDIzoACLw395L118HucxXVjAVVFdshddFBH1QHEsLhJ1LfTY/h+yk4sACqDcklybkwQVUw8J5DnY5VC2ZHIDSlj7ZQGZafqPc3gyETh4MRtO4QTS76myk6fnIVEQIhxKQMolcRpezSF/y7dUrL3LjXIb0aUHU2DMjl3OL2A0616fZ6yAyjaz9tpN5bmfKwIITvFtJ7nKDt8GkjHNRcl55HdB9eoFGWiyJZ6XEQinaUhls1WlZ0A8+wAjOwMLCRFZYfz1xWm7AiCuoxVJRdIQKHsmCSKklLWkg3G31mZsSO+r3giy/KCZcNrMdAqY9FgwZJ6dqqrjJUsoUHZbhnLnZKHoTYL1shO7qgIQE5QLqWyQ4gTGTL54Htr+oV0FwJlxg4AalBWJSgbDQGVBreqlR3ZpEyOg23RpLocLQgKZacFcIoX+YKVHX8jwEkERse3Q84jU4Y3qB6noYIl7sYi6ck+t4POYjMDVXZYGYthoMApkR2nv0H07NglO4le0SRIkJvVUkH0pc2VHUA+KVlWdlrEkzExKAP9Y1ImXT7Kiyg1KJciRTlVpWWsChqU3Rl5f27KbgMgmM7HCicMlJ0SljyJV+znB46Bz+3Asi1hfLymq2TvX0qE+/SUHSXZIeZkjSGgWspOjVzGGhTywMGJNx6dUQWR6euWfUCl8Oy4vIC/XnrvHs1Fc5OTCUJlKmN1KKadm6UnExBlJxbpBd68Htj6VUnXaUcBIzsDBIIgwJ0WL2juUD38Hid67JaxlCUsoKrIjhXPDgBMlpSdFR1R3U4bQRDktnNJ2XE5HfTk2x++HeVcLALSctuX1onKt4psRuFLkMhOKgrwlU9YreS4CG9GJnxeIYEa9KHX5G44ouHZCZQhQZnsc0PrfThpr2EAgIfeX1Oy9y8l8spYtBtLWcbSV3Y0gyUVKcoup4MmircrfTvEnOxvEIkK9ezYVDRILo9LYYzWUHZSGR5fbRIfV3ZiAYoyVokNytScbNGvA8jKzvjed4B3bwHeuqGk67SjgJGdAYJYKougIJ5gvKEm+N1O9NpVdpTmZKAqPTsBE2VnUMiLEY0BAMCXG3s0l+mKpdDblwbHAaMHySdjohr19UNHFjG+Kj07fo9TEZVfRClL2UVHyA4EIF357alsOy4WXhvKjiAI8GXV6lYb12Wu7Bh4dkpJiuMKMn/m/qMAAK8ta8f6Tm0vSSVBy1gBdRlLW9nRIDtpxagIAsXkcwAKk7JCtVG2nQOlUXZ89eK/NYIFv9saRjLDo87vVp0nANASU6k9O0plxypIzo4nISmB0Q6DpXdeMLJTQSQzWWT6wf8BqOdiuYMNomeHtp73WHsTpV8HqJrSB2DdswPId2F6pSyi6gxr8MOnKF8FytBlo4eoRnkEUJayijApE7+Owy3euRJfQhVsz0qVsVJZHjWCmjgM5jqL8uyUStlJZXhKAoMeF8a11uCgXZshCMAjH64tyWeUEqT1vNaKQdmbX8bSJLw5KcraZKdDvWyx3Vgmyo4yXye3pKQsY5XSW0WUnWYLoyIISM4OR25y7DSk7ERgZKdCSGd5HHrbIhz9l/fAm2S+lALKuVjw18PvcaCbKDvJMJC1ILfmkp0qVHbMPDsAMMUkSTm3E4ugHF02etAqYwElMinTadMhgOPkqdNVYDgvC9mxcEORSPM0Y4fAkrJDyY68nYIKZacUFzql94d0/Jw1YxQA4IlPN5Rl2GQx6JXUrnrpIqvt2dFOTwbkfYCUvwCoDMqAXMZRkf5ou3rZUnp2NMmOtjkZkI/bDC/kJ0cXga29YjRCq4UhoAQkZ8fLS4n3jOww9Ce29iawoasPy9sj+L6j/HfUXXFZ2YGvDj63E2EopFeTlFAAQLR6yQ5RW8w8OwAwmSQpb+jRvBjldmIRBPoxWDBipuwUk6JLZxJJJIeSnSpSdkqYs2NF2Umks/LNgITB6LLg2ZGM5ErPjrSf8ILGiIQCQPZtj8I3dtC4ZoxpDiKazODpxRuL/oxSgvxmsmdHY+q5URkrI39fChrw1wVkUophoIrjQDkEFJCVHSEr+tSsQkvZ0VC/v5Bm7OX6dQBRgSNiT6lMyqkMj7e/F8/BPxhaa/l1Ia8LbieHENm/Ez1V4c/rbzCyUyFsV3QRfNIPXRXdSmXHVw+/2wkeDvQKon/FEtsnyo5TOolUkUGZqC1Br7myM2FwLTwuB3riaazV8DzoKTvBfgwWlMtY6gySFpoeWwLPDikhELJTBYnYqTJMPbdCOBLpLGo4aV+QhqPaU3bk7eRXdu6VgBhr7dsOB4czZ4wGADz8wdp+UYetIrf13KdVxtIZAgoolR3FsexvABwS8Y91WCxjKdQPq+oOz8sdpwZlrM5oEuukc8eknE4sQNw+ZKZdqZS3t5d3oCeeRkuNF/uPHWT5dRwnDgOt4SRlR+BFNX+AgZGdCkEZ8KTZQtrXDbx5HbBteck+T6nsEDncVvs5MSg3jBL/riKyY6rspOLA//4P2LIUHpcDPxgi3hlplbLkTiz1iTjQjwbliE4ZqyTzsZI5JYRqVHZK6tkx316JNC/fDDSOAQAM5roseHbyFTing6OEpxTDQPX27bl7DUWtz4V1nXH877vqMJ2mFVPg85QdzTKWlrKjoe45HJpZOyqjvnLiOSArO4B1345yQrrLo2tQXiKpOru0hOj3zEWoxJPPn/1iEwDg+MlD4HRYazsnaAx6EISC8A3AUhYjOxVCZ1QmO5+s6covp3z5JPDurcB7t5fk87pjSdRCUcaSOh2oSTluQV0ireeN4h1lNXg8CKhnR0/Z+fZ54J0/A2/NByDPsSEnLYJEOosN3eJFb2xLrmen/wzKEY1uLECp7BRRxlJ6dgCZ9FQD2SlhN5ad1vO+dBa1RNlp2R2AqOyEE2nDOWokU6Y254IXpPOxyqPsAOL++JN9RwAAHqySNnRlnEO+Qdlazo6mZweQy1ORdrTVaSk77erlHE5ZDbKq7CiXM1B2qDlZQ9UhqClhR1ZvPI03vxUJ7YlThtl+fUPAgxBRdgBGdhj6D50KZWdbJJlfTgmLLN4SCbGAaCQMpxSFD389HA4OXpcDvXYmn5MyFlV2qsezEzdTdrrXSQuKw1D1OrLWdsYgCKLhtElq2SSgF7B+NCjnpqS2SsrOtrIoO5Unr6VUduy0noueHWl/bpkAQOzGEgT9MoQgCLreKpkYl0/ZAYDTp4+C08Hhg1Wd+HZL5UsTRAmr8bmo+kC2QyKt0Y1l1bMDaCo7vX1p+X1zDcqAXMrKWjxeSCYP5xCJkk6o4BdScvJkDb8OXV1JlQ2XgOy8+NUWpLI8xrfVYMIQ634dgsagh859AyB6nwYYGNmpEFTJnwA+WdOpXoCoKCUiFKmI+P5Zzk1PAGKwoI0yFjEoN0jKThWVsUibr243FiGP0joTZefbLWFVWWpVh1TCagnltZP63ZJBOW3zbl0QgHSf+XJ0cUGejeXV9uwU1XpOPTvEoKzoyqsgMlkeREQp5bgIXoBpxENCQ9mp4+IIIKFbyupLZ5GRVrg2x1sld+4Vr+zEDPxoQ+v9OGIPsWzz8Ptri/6sYpGbsQPolbEKUXbk9vNanws+6fn2cEIsU5FzmIrs2Gw/V5qTOU5T2cnyApZukMIENTqx6OpK+0QpyljPfC6a0E/aa2hBr28IuhFUkZ2eotdpRwMjOxUC8ewQ9p/n2yFmuxKZRtPSENC0pw6kTcDWfKxMks7WomWsKiI75A7ar0t2Not/S+rFkDofWmq8yPACvt4sn8j0zMlAEcrOCxcDN48ButdaWjyR5ulFNK+MJSk7sVS28JNonrIj3SlWuIylLDeV0rOT+95aUCk7tcNop5qRSZmoOk4HlxdmWcpgwRgNzNRWLc86YBQA4Nklm3RTwfsLZEq80seiVNhoud6g9ZyQIq+usrMVHMepO7LI+dLhFs3MBHbbz5Vt5wDgk95LQXZWb4simswg4HFi19b89SeQy1jFbZP1nXF8tq4bDg44fnJhZKeRlbEY2akUtktk55Ddxfryx6tzyA4pGZVI2RHiPQAA3itLoGKwoMUyFlGaHC6gdkhJ160UIGWsoF6oICE7EnnkOE5RypK/uxHZ8dMLmM279dVvi1OT27+xtHhEipjnuHylKuh1UYJcsLqT69mh3ViVJa/pjOyNKeW4CMC8lCXm7JBuxTq6j7dxXejRaT8nfp0anytPBaTBgiUgO2aq5V4jGjB6UBCpDI/PdbKj+gtyJ5ZcAlYO9KTqjkEZS7eUqRgZAcjJwD19aXUnlnJbFKPsAAplp4cuQjohhzcE4DJQIGu8pZmPRYzJM3YZZCtfR4kGZlBmZKdS6IqJB8xhE1rhdHDY1NOHjd0K306Jy1j0YCXdBRDlZcsjI0gnVrC5qjweBETq1x0XQcpYinUmpSylb0cmO/kn4aB0Z22rG0sQgPAW8d8WS1lRxcRzrWF/LVpzgewgV9mpEoNyMiv/ri6b3SZacDkdIG9jSnZSadQgn+wMRpeuWkLaznP9OoBC2SlFGYua77WJPMdx2EtjX64EcudiAeqxDzLZMe/GUo2LAFTDQAHF/KlERtGJ1ap+TbHKDvHsJMMAL27LuJmKLEGej1U42REEAc9+IZawTpxSmKoDAIN8gJdTrAcjOwz9BdKNNaIxgB8MFe8ePl0rqTuCIJOLEhAKnhfgTIl+DGegnj7udzusl7EI+Qo2yxfHTB89AVQaVNnRkvpTMZns8Wl6QpOTlMXneF5QeXZyUVBpoq9bNkdaJDvU9KpzcWsh6bEmwYI8L2DBWyvx7orcMMhcz051kFflHb3Vic5msJq1k0lEZAO/rw6oFS8sbQbt58R4muvXAeT9sCTKjplqCdkoq5cK3l8gxLBO4dlxOzkqttCsHQuenXxlR52iXKNs7dYyJwOFKzskS0yhhJNSVl+aGMaNyQ7JyCpmaO8XG3qwtjMOv9uJOZI3qxAMcud8f0Z2GPoDgiDQbqzGoAf7jm4EoAgXTIblYKtUVCQ/RSCSyCAkiCcXl5Ls2Jl8TspqSrJD1q/CEARB9jVotZ4TZYVAuqhPHFYHp4PD1nACW3r7sDWcQF86C5eDo8NClShomnVE8dlWlZ2kdqAgARkCaNaR9c6Kbfjzq8txzfM55TPdbqzKGpRTel6NImC1/VzoEy9kac4DuH2ysmMwH0tZxsoF2Q9LouyYqZaQifuSDT0VDRikZEeh7HAcJ7efp+0oO/qt5xAEtXKS23ZOQEhLocqOywO4pXOBRHZMOz/J6vqKL2MRY/IRP2gzJLtmaHTnlGIZ2WHoD0STGXpibwp6MW2USHaoSZmoKADEadTFTTZWjopwBmTzni2DMq2Jt4gnAjI8sgp8O4k0T/mgprIT2az+v6RsBDwu7NYqXui/WN+D1VKY4MimANwaF1xyAYunbZy8lETL4nYkJ8dcczIBmQtklrXzzvfifqQMsASg79mpEoNyKfw6BB6pFGLafi5dyBJO6TdRenb6tD07EQvKTp/dzj0NGKqWEsa31cDndiCSyGD19srdgPTG1enJBL7cjiwrred6yk42CSR6aadiRFnGCuWoH8V6doC8YEEr5BOQldlCDcqpDI8XvhTPH8WUsACgwcmUHUZ2KgBy8Ql4nPB7nNhnVCM4Dli9LSberUdz0lCLJBTiEFA5UJDA57bRek6VnUGiAZCoAlVAdpRlJWVUP0U4h+yofDv1AMQ7YiNzMqAoTZRZ2aGBgnplrBprw0BJ+SqcO3lZT9mpEoOyx+UQE69LAKtZO5zUaZh0EbIjXlwGc130Ap4L2bOTT3Zozk4J2o6J0qepWkpwOR3Yc1g9AODzCvp2tJQdIGfyeSYlK9daU8/1PDtuP+CVzl/RDkVCcVp9M6YE9ezYTFBWpi/ntJ/3payWsYrz7LylGA8xYxfr4yG0UOfIuTHqb7LT8V3+da2fwchOBbBd8us0hcRugrqAmyoMn67typ8uXuQdd3csJXeaEMMdRGJADcqJXmP/jbKMBSiyWSqfukvIR8DjhEPL2ErMyQQpJdkhxs5umexo+HXI+wM2PTsR+8qOXMYy9uwYKTtbevuwQhpomuUFtcJAlZ3qGgSakgzKP+BWAzeOEMelFAmrk88dEtlJuSSPhkrZMW49r/Xnb6eSJigTg7JJ2UQvKLM/0aNLdhTKTlpxg+Q2GBehpfDRjqytCuUkoxgCqufZsVvGUig7OcGCJGfL1KBcZBnr2c/F89YJU4baHg+RCy8vnnuSgrQP9TfZefk3wC3jgKX/7t/PVYCRnQqgi/p15LsHlW8nl+wUq+zEtZUdv8eJXjr5XDCefE7JjnSyIfJzFSg7pKykW0O3oOx8ubEX320VL/Z6yg4NirNzAVOSHYsn3KhOKi+BFWXn3RXbVf8P9ylOuITUVFs3lnSR2xPfi0by1YuKfk+rk8+d0ndPuyXiJ5GdJi6CaExb8ZI9O/nKDo0pKMVsrKS5QRmQA+4qaVLWMigDOSnK5PhzekRPTA5Sep4dQBUsqJo9lTsElMCuskPLWMUrO6EiWs974ik676zYEhYAemxvEiSFqK+7aC+oZaTiwPoPxX8Pndo/n6kBRnYqAJKePEgxjmDfMU0AJN9OicmOStlRtJ773U6k4ULSYWHyea6yQ8tYlTcox5ImJ59csqMIahzdFESd341khsdnUjecVts5oMxOyebPMtNDIZ4dnSGgBLQby6D1PI/sEN+AIGh4dmrl9atgdx25yDVC2j65260AeCyWsVxp0ZydIWTH34CsU7xQumPtmq+RPTsayo5H3leKhazsGF9cCXH/vj1SsuGTdqHVeg7IacjJDG/o1xGX0fHsAHJrebRd0XqeNmg9L4GyQz07xKBscnNFVrWIBOUXvpTHQ+w+2P54iDxIzQeU7PCZ/jt3r/tALFvWDQeadumfz9QAIzsVgLITi2AfyaT83dYwkr05J9cid8queAp1Op4dAIg7pRO8UYR4VOHZAapL2aEnH5OMHTIUUKHsOBwcJkudLKSJZYyOskPu1rO8YNrKTKE0R9ttPdfpxiLBYtFkRrO1mecFvL9STXbofKdMUjzRAQrPjuL7VlDdIYSknpCd6FYgW9xF22rrOSE7WeIJ4ThkQ4MBAP6ENtkhBFLLoFzSBGVC5k2UndZaH4bW+8ELwJcbe4r+XLsQBIGqXfUBtWJDy1hpJdnRPs5IydFQ2YlspcqOkOiVvTbBIj07mgZldbBgrADPjuWbIwkkSLDQ8RB5kM5521CPrEPaX/urlLXqf+LfY2erAx/7GYzsVACd1LMjS6XNNV6MaQ5CEICebfoek0IgKjvaZSwAiFGyo7Pz8zwQly6epGZeJT4PwILMTxSCxjHi3zm/5xTFML9BIW/eXSlBQGF+thwsSLwEgO3Wcz1lJ+R10ROtlrqzbEsYXbEUgh4nxreJ24mWsZTfnVxsXF6xpABUluxkc8iOwMt37AXCauu5NyP+LsqEcdSIZCeUbNds5zb27BRgZteBVWUHkPN2lmzoKfpz7aIvnaW/s6FB2aDtHJDb0409Ox3Us+NNSDdivjoxNkCJgpUdBVkrsoyV55kzwbrOGBYXOR4iD9JxHRH8SLqk79PfZGfM7P75PB0wslMBkPTk3KnaxLeTp+wUGfbWFUvLyk6OQRkAopwJ2Un0yGpAYAdTdjJJuQQ3aFfx72Qu2ZHb8fVKWIDY8UJO2pbu2LNpdQeC5dZzqRtLx7MDKFOU80/i70hdWNPHDqLqIS1jETLj8gNOxftXQUdWmlwoBUXeT5GlLKtlLG9G/F0Er3wz4KwfBgBoRReiGtubqBjVouwA+UGZ/Qni13E5uDxi5nVplbEKUXbkFGVyfPiTnernlChF63muQZkmKBtvj4DHSRO8ozZ8O6UYD5EH6biOwa9Q8vuB7IQ3A9u+BcABY2aV//MMwMhOBUDKWKQbi2CaRHYcREUhkmyRhCIciyPISQd7jmcHACKcSft5THHnRO54qojsxIxySIhB2OUD6keK/84ZrjpZatkF9DuxCGyZlKPtABSKQNqeQVnLC0JAp59rmJTflfJ1Dtp1EJXSSdpvnl+HoApMyoSQ1PJKsrNJZ2lrsEp2fFlpZppPSXbkFGWt9nOjcqPS31UMUhmeXvytKDvKESh2SyfFQunXyU3AVnVjmXl20jqt54BK2SHKSTDdqX5OiWLHRQD6yo5WzIUCHMfRdQxbJDvieIgSl7AA6tmJCn5EHf1Idla/Lf49ZAoQaCz/5xmAkZ0KgJSxGoO5ZEc0KddkpJ2QThcv7gKUivXI/1HI9D7p5NlrlrWT24kFKAzKlS9jxY1ySIgyUDNY0S6vVi/qAm6MkRQdvU4sgoAd46myhAXYbj0noWla0FN24qkMPlsnGq0PHNdMVQfq2cnN2CGogsnnhJCElGRH2c1WAGSyY7y9/Fnxd+H8MtlRZu1opShTz45GGUsmxcUpO8pyqZkhFgD2GFILt5PD9mgSG7utlU1LBb1OLEBhUE4ry1jGyo52GUuefF4jHR8NfJf6OSUIaclqB0PmwdCg3ANAmaBsTj7tmpSXbuzFus44Ap7ixkPkgZSx4Lc+D7EUoH6dg8v/WSZgZKcC6JTKWIMUnh0AGFrvx8g6F007RgMhO8WpJ0Jc3Kmz7pCqdEGUHdMUZVKKIZ1YQJWFChooO4Ts1A417CD7+QFjsGtrCEf8wPgEQ/NTrJy8aAlGusu1aVA2KmMReTt3ZMTHq7uQzgoY1uDHqKYAPdnmeXZylZ0q8GCJJmIBIV4RgVCksuO16NkJ8OJ+7FCUeeWsnc68FOV0lqcXPU1lR9oXE2ke2SLGN5AymMfpsJQs7XM7MUHq3vmin307ep1YAOBTKTv6np1MVv69NMtYZBhovBNBl7hNm7le9XNKlEHZoWTHQlmRmpQtKjurpGysKSPqLZFby5BucmKCD12CxRFBxYLngVVvif9mZGfgQRAERc5OfsbEwSPETcLDCdSJnoFiCEWWFwApME1ZwgJkstNtRnbI+IqQguzoqCSVgLGyI10sa4cYrvNP9x2B1y6ZiaH1fsPPInX6mB1lp264+LfNBGW9nB1AVnZyy1jEr3PguGZwHEdVh0iuZ8dTo37DKgiJTGV5+JGER1AQi37y7AQFcZ9QDsqV52PlKzvKi5fRbCygOHUnbjTzTQfKoMz+BO3E0iA7VlvPlaRUk9z5G+moGleiE363E81cj/icZhmrfJ4dK8qOnLVjbWQEuRFuzrkRLhrScR2FH9uzFqJGSoH2r8TGFk8IGLZPeT/LAhjZ6WeEExmks+KdixbZmd4qXkR7HHUlmUbd25emgYIOpUQPwO8RN38nb8L0czN2gKry7JBEU2NlZ4h8gS+i9Ba0U54gbedNUheYhTKWIAhygrLBnaNeijLJ1zlwnGgkp8qOmWenGgzKGUHO2CHoJ7ITko4RV1A2q5MyVjN60RtTbztSwvK7nZpz1DxOB029Lca3QzsNbdzlVypJmahfWsqO1W4s5XbyaA2EdTgUA0HF9vMW9Ij/1yxj2VV2zEMFyfbUHE2TA5qibLGMpdWpWxIoyM7WdD+RHVLCGnWgZnhkf4ORnX4GUXVCXhfNuVFizwbxoGjP1iDtlHbKIi5A4lws8UTNKSV6yDk7pkw/plXGKp44lApxo8F8VNkZWhI1ypZBmQQKkpZ3CyfceCpL834My1gaKcqbe/qwsiMKBwfsP1b0f9X6cpUdPc9O5ctYqWwWDVwu2SnSoCxdMJNGZSyeR0gQjxG3kuwEBiHDueDgBGR61N4ho7ZzQDSnBkqQohyzoSIQkCTlZZvDNKCvP9Crk7EDWM/ZIXlITgcHlxbZAfLaz2kZqyTKjoFnJ5tENtVH19GWZ8diGWubFDib27xSNKRrSFTwY2tKUq+NctVKgSry6wCM7PQ7Ok125lanaM7cxtdifVTaPEWQnW7FxHO9Mtb2jBnZId1h1ansyCFfZspO8anP9gzKhOyMFf9Ox00j2omq43RwhneOWsrOe5Kqs+ewenrBkT07Etkx7cYKo1JIZXg0ErLjl0hHeItY+y8QVpSdbDICJyf5REIKsuNwIOqW9vkc0mU0KoKgFCnKdtrOCYY3+tEU9CCV5fHN5v7bnoTs1JoqO9I5w3AIqMGlKaf9XC5jlcKzIy3nVJyfPSGAk8ZdRDrpw1Y8NXZHRhBlJ9fPWTSk4zrG+c09mqVAKg6s/0j899jK5usQMLLTz9BKT1aCk0pG21GH77qkE3QRhEJv4jkghwq2m5KdKi9jkdA1o24sE8+OVdgyKEdylB3A9KRLzcleV177rhKk9TySSCMRF78P8escNE6ekCx7dqT11fXskG6sypWxUhkeDaSM1TJBvMDwaTnQsgBYITvJiLjfJwU3fP6A6rm4XyyNOKNqZSdsIR6A+GyKUXbsBAoScBxXkVIW8TUZenbSxp4dw1ERBFTZaUedR0ATIchaZSxCWopRdhwOenwkImLnF8cBPrf55bNGOZndAuTmlRIqO4JAj3unrxY96AeDcpWMiFCCkZ1+Bq3JBnWYu0QsOoVafL2dXKCKUHZiCmUnp4xFlIPOrGLn11IetLqxqsigLM/GyrnwZNOySbh2qKL0Vvg6+90FGJSVZMfEpEwDBU3u5Gu8LvjcDtztvhOeO3YHH9lGR0QcuKu8nWqpZ8dE2akSg3Kj8sJFLl5FlLKskJ1UTLyAhRGgXUP0uYCYouyJq2MEwon+VXbMhoDmohIm5V6diedATs5O7iBaBejEc70SFqBoP2/HYJeUfM25ZDVQiYI9OzlhftK5Mx0V95WA22l4M0JAJ7NbJLzbIybXh0KQ7hPTyAF4g3X9o+xUyYgIJRjZ6WfopSdTRGWy80W7RsS/3c+Ly56dXGWHeHbo5HMhq32xo91YWjk7lSc7une/JNTP4RKJmlexzgUGrhFlp8/MoJyMyiWhuqHyHaaJSZmakw0UA0C8e2+t9WGG42s4UmGsXfYxuuNphLwuOusLgCJnJ4c4V6FnJ5nhZc9OoImOayjGpGxlXEQqJp70IwjC4VCfmHlpPlYwZz6W7NnRJzulSFEuRNkBKpOkLHt2LJaxNJUd6aJspJrUyGWsNqdkGvY0iQpMLijZsZizQ/J4XDlkQzp3pqI9AMzTkwlCuaGeBhAEQVZ2akpIdugxzSEQrDXPVSsFqsyvAzCy0+/YTt32OmRHUnbinkZ0paVliigVqedi1aue87oc4DggCQ8EFzGt5RwA6T7ZhByUyyMqstPPSa250PU10EDBIeKJkKyzwFsO+MtFwGrrOSlheWpEIuGWfl+TFOVowhrZAYBhIaBOmma/cs0aAMD0sU2q7iBSxoqnsuI4hmruxsoqurECTbT1uxiy47Wg7GRi4gUzyuVffLk6cR1qUh2qx2XPjv52KsV8rEI8OwCw5/B6cBywqacPHRGLqkaRMFR2rLaeW1J2SDdWO1olv07UpZPOa3s2lo6yI507s3FJ2bFIPuXJ7OZkR9mpq3szXAjoMV+DQTU+9JCcnUzCchyGLShHRIyeWfr3LxAlTC1isAKjjB0AlOw0tQ5DbK14wKX7wrjyqaV5i7bWenHxobtqtr7Kn5fWnHgOiOqA3+1EPJVF1lsPV6ZPJDsNI/PWB06PKn2Znqj4jHg3lHsn1I/QvftVZuwA0jpzAARR4dCJqzeC5WRcQnZqJXXCHRBbV01IltKzY4axfpkEb9m8EcAYlV8n932iiQwayF2eclsCcomvogblrFrZkVq/y13G4qXQzZgG2SHzsRqyat8QVXYMylilSFEuVNkJeV3YrbUG322NYMn6HhxeyjReHRiFCtrtxtIcFUGgKGMNauwBAPQ6G6H5DW1PPdcIFQTouTMb7wHQapns2ElQ3i41r+h16hYMckx7a9AY9CAKP3g44UBWPN+7jbPFbIMECQ7dq+IjIpSoqLLzzjvv4Nhjj8WQIUPAcRwWLlyYt8y3336L4447DnV1dQgGg9hnn32wfv16+nwikcD555+PpqYmhEIhzJ07F+3txU1KLif00pMpJHIxZsRIxCAeqG4hhWcXr8PTizeq/ix4axVe/War9vtI6I6nUMtpl7EA2beTJQMQc5Ud5agIZe1VeaKqsEk5ppdoqjQnA+L6F1l+s9x6TtrOieROTrpmnh1axtK/iBKM8solp1i3uB8cOK5ZtYzL6aAXynAirR/VX4JMp2KRyvBohPT5gcaSKDuU7BiUsXgSFufIv/j6m0YAAAbx21WzpsIWgh+DdgIodRClsQr270tJObM/kpR5XqC/ida4CJ9bWcYyz9kxNigTstOBBl48X3U7y6zsSJ4dQdpX/JbJjvVQQbkTq8Rt5wqPlFhR4BB3lXE+VhWWsIAKk51YLIZJkyZhwYIFms+vWrUKBxxwAMaPH4+3334bX375Jf7whz/A55N3xEsuuQT//e9/8dRTT2HRokXYvHkzTjrppP76CrahNxcLgFgOksjFYftOxG+P35s+9btDRuC3R4ynf2bsIuaofLy6y/DzVN1YOQZlQPbtpD3Sc7k7f5SQHbViAKdLPiFU0OeRyfL0BJmv7OSQHaBoI67l0kREUUIDRGUHMPfsWBgVQTDUKY9VqBd6MazBj5FNgbzlapS+naRJGavCBmVtZacYz47CGKsDISHe+Sac+WQn1CKmXzejB31J2fcRoXOx9EmpvyTKDjEo27/Tlzuyym9SjiQytJptalA2UHastZ5LZaxMHwalNgIAOlGvvSw5R2WT1srtJsoOCRYsRxlLjiUpddu5XMYi5bEoVyayw/PAaknZGVMdLecEFS1jHXnkkTjyyCN1n7/qqqtw1FFH4eabb6aPjR07lv67t7cXDzzwAP75z3/i4INFFvnQQw9h9913x0cffYT99tuvfCtfIPQmngMQB83x4kHhq23FT6b7gNfdAJ/G2dOaRaOrhNGDgnh/ZSc+WWNMdkyVHemgTbprRduanrKjFdjlCYp3QhVUdkh6MqBx96uci0VQpLLjt2o6pV1gpIxFPDvWurGM0pMJ2hw99N+DuDAdEZGLWr8LW8OSz0RX2amCbixlzk6gSTZ1l6CMlTYKFZQGPGqRHV/9EGQEB9xcFp2dWxAYOkpcpT7z1vMgbT0vxrNDYhXsn6pJR9aXG3uRyfL6IX0lAPHr+N1OzRIUIS/pVFIkHkDhreeeoFh2TUUwKPIdAGCbkH9uA6AmLZkk4PZpLweIZEgrQRmg504uSciOte1RYyNBmZSxyqbseGsokepFSCz7lZrstH8FxDurZkSEElVrUOZ5Hi+++CJ23XVXzJkzBy0tLdh3331Vpa7FixcjnU7j0EMPpY+NHz8eI0aMwIcffqj73slkEuFwWPWnP8DzArpjBq2FREXx1skHJc2zUV+c9xklnsiWt0foe2qhK5ZUeHbq854nZaykm5SxcsiTVsYOQRV0ZBGFxeXg8k+QhspOYetMShN9pmUsxbR1QCY7GWOyY7UbCwAaeXlbNXKRPL8OQY2y/Vxxl6cC+X82ab1zpcRQ5ezkGpQLNMFb8exwkqch6conO5zThU5OPNbi2+TyeUTKTTH27BBzeAmUnQLKWLs0h1DjdSGeyuL79vIeo0ajIgBZ2XFmFMqmobJjopzUiKWsmuhqAMAWvl57ORXZMSll8Rnaop1PdsT3dyZtKjs0ZycD3mQg7PayjYognp0QrSh0mY0IKhRVNiJCiaolOx0dHYhGo7jxxhtxxBFH4LXXXsOJJ56Ik046CYsWLQIAbN26FR6PB/X19arXtra2YutWfS/L/PnzUVdXR/8MHz68nF+FIpxII8Prz8WSiYXioqXTJdMU8mJci3iy+HSttrqTzvLIJGJwc9KF2cCz0+eSDKu5EeJa60RQBWTHME6/DMqO5XZiouzUKAzKgGXPjhWDcm1GTnMdhF7sP1ab7BD1wVDZUYYMVmh7ujJReV8NNMq/XSZR8EmZtp4bkB2HdAFLu2o1n+90ir9rqnsDfYwoO8aeHbKvFKHsFDAugsDh4DCJ+nbKW8oyajsH5G4sSnYcbs2LYdJKGQugvh1OIiebMzXayymTkM1MykoypNON5ZI6Uy0blL3i7yEIahVaC1TZKWUnFqDoxqqlqhEdERQ3rgzYRpX6dYAqJju8FBF//PHH45JLLsHkyZNxxRVX4JhjjsG9995b1HtfeeWV6O3tpX82bNhg/qISgJSwanwubZnWZlLxtNGiKU+vlNUdT1FVR3C4NGVjH/EVOHRquEqDci6qIFgwrhe6xvOyb0al7BTnTaEGZVPPDjEoE7Jj0aBMPTvmBuVASu4QanZGNY2hgKzsxKNhANLdZa5nx+kCSPxAhTqy/Glx38u6AqIS5vYBAYnAFVjKsmJQdqXE75t2a18wwy5xHfgeeR3CFjw7AervKkLZKTBUkKC/kpRJJ5be70HIiyuj33YOWPTsAHll9Q1pHbLDcWrfjhGUiqYzR12R/I7utLivkHBRM/jcDrik7CYz3w41KJcyYwdQlbEapYrCNrPU/EKQiilGRDCyYxmDBg2Cy+XChAkTVI/vvvvutBurra0NqVQKPT09qmXa29vR1qbfaun1elFbW6v60x8wnXtC/TFKsqNPKCjZ0VF2umNp6tfhfHWaSZZ+6Y4r5iTKjh7Z0SpjVX5khO6db2ybKEtzDnWMfJHKDjUop7Oq7hwVeF679RywYFA27/IhcMXkrsOgENO9cyVZO6m4RGI4h7w+SpSwI2tlRwR/fvU7esdvBcGMqLBkfMrJ4wV0ZH32EPDV0wCs5ey40uLFIOPRPg9EvNL+I5FnQRAstZ6XohurGGUH6D+TMlV2TMpYrqxE9jVKWIBFzw6QNxpifVKH7ADWh4Eq52LlBhRKqrgnY0/Z4ThOnnxu0pHVSQNny0R2PCHU+91wcChPijIdETECaBprvnw/o2rJjsfjwT777IPly5erHv/+++8xcqSYAzN16lS43W68+eab9Pnly5dj/fr1mD59er+urxWQ9GSzjB27ys7Xm3o1DySjuVgEpIwV5XR2fr1uLNW6VVDZoXOxcs3J0l14qE1ULQiKVKPISS7LC/odPvFOyWjOySdliwZl6tmxcCfPRXNKtfFOzeWIspOMS91bnpB2hHsJTcp3/W8lFry1Ci98aZ2kBLPi+vE+RRux3Y6s8GbghYuBhf8PyKYteXbcEtnhdchOQpqP5ZJ+73gqi6xUjjYipWQ2VjGenWIMygAwWZqAvmpbDL1x68TTLowCBQGZdPoEaf/XGAIKWGw9B1RkJywE0JVy0m2SB6sjI/TazgFaxvJlxBuGgI3uODoM1EThMw2cLRQKn57DwaEx6CnPfKwqHBGhREW7saLRKFauXEn/v2bNGixZsgSNjY0YMWIELr/8cvzoRz/CQQcdhNmzZ+OVV17Bf//7X7z99tsAgLq6Opx99tm49NJL0djYiNraWlx44YWYPn16VXZi0Z1Zd1SExgwqqkTkX4AG1/kxojGA9V1xLF7XjVm7qaVdo4nnBKS7KKLXimjYjVX51F15LpZe2/lg9eNFe3bkQyaeymqHf5HyWbAZcEonf4vKTsRq63la9rFknT44swlxWylLdhKI+pDp059JBKCk7edbe8ULhx1lJ5QVLyS8X0l2bCo77d+If2dTQLQdbqd4sTciO+Runfdq3xCkg+I+5OsTyQ7ZRk4HZ3iHX5LZWCmd/dsiGoMejGoKYG1nHEs29mDmrhoKbQlg1bMTgEQodMpYdj07gNyJFUtltJU2y8qOTts5QG8WfXwMHHgEbIT+iTcbfaZlLLkbq1wGZfH4bgp60RMvg7JDwgSrsIQFVFjZ+eyzzzBlyhRMmTIFAHDppZdiypQp+OMf/wgAOPHEE3Hvvffi5ptvxsSJE/H3v/8d//nPf3DAAQfQ97j99ttxzDHHYO7cuTjooIPQ1taGZ555piLfxwxdRm3ngLayQ+c5aZeK9jXw7YjKjn7bOaCcj6Wx8/NZeeJ0lZax5IRZk0BBgiIv6E4HR0/Eunfs4ZwSFlDAuAgTz05UKmE5vXASyZjMMMsBUR+yfXJXhiZIqrIGsbYLsq8nbFzoa3hR2RH8TfKDdslOx7fyv8NbqEKQ1PPsCAK8GWmYpE9b2RGkrKRgUvzNlYGCRsMgLfu7dJBWZEhZMazrgbSgLymjb6fXID0ZkMtYQei3nQOKQaA2yM52KWNHl0yURNkRz58OCKhBn62QR6LSRgzITjKTpc+XvPVcMS4CEAmwfL7vKc1nqEZEHJT39MqOCLZFLKZYlwkVVXZmzZql73uQcNZZZ+Gss87Sfd7n82HBggW6wYTVBBoapTvxXINYmBCKaaMb8dTijZpkx2jiOQEpY3UrZU1BEGXIvm65FTPQlP/iKjAo68/FIqMihqofL0EHWcDjRDLD69+xU3OygmjRBGV9ZYfnBURTFruxCNmpaZVLjDplLGIaFfSGgBJ4SlfG6o6LZKfPpANFiVo+DDih3tfsjozY9p3878hmeOr3BCAqO4Ig5JOTVBQOiPs4p3NDwEn5VrWpbYAgyIGCJoSUlJ4KHQSq3L8KSVAmmDKiHs9+samsHVm09TygfaF2Oji4nRyCMPPs2Gs9B+T0ZN2RDFZTlImy49T4Dm6feAxnEqjl4pYTlAFl+7m+ykn8nC4Hp0sYC4bCoAyIN9sbhBKXsVa/Lf6tMyLij899gw9WdeKOH03GCVOG5j3fH6haz87OiE7TuVgGZSydC9C+o8ULw9KNPUjkXFiMJp4TELLTQ3IXsin5gkyUJn+jXI5Rogo8O+Riaik9GSgJQaPDQPVOrpTsKEzyFlrPYyk5hdbUoEw+I9Qmkx2yvXJA3kugJz2zMlZx25PnBXRLd/pWyY4gCKgVROWJC5ZI2YlsVSkEZMiiClIiblJwwe3VMG0D8DSI6+BGGoh3Wmo7B9SjRcxu6rRAlEO3UyNDygbGDBK3NyktlgNmnh1AJDBmZayCPDsS2dFVTpw2Dcpayg5Az6G1iNkqK8ojI/RJb6fCr2OkFhaEpLp83RT0oKfUk8+3rxD/HrJX3lOpDI/PJYP8D4b2TzOQFhjZ6Ud0mk48l5QdpT+GKhHays7wRj/aan1IZ4W89lKVsmPi2enNeMTsC0A+ALQ8REpUhWdHZ3aQVsYOUBJlhyTj6gYL0k4sBdGyYFAmd6ZuJ2fuWYgQZadN3j46ZSyiQDhoTL9O50qJPDu9fWlqFrXqV0ll5fRkR1BL2bFAdnge2KZoaAhvVv2Omu3nEtkJI6A7fLEuFJQTesOb5LZzE2XHkpndALIfrTgBnhCQnjIalMl763VjAaIPx6yMRbaR6f4faBK7CgFE3eL+oq/sWBwGmpVaz/WGGkvn0DouZk/ZsVDG2l6uTiwgL0i0KeSVu7HS+l2ctkCOz7p81earTT1IpHk0Bj0Y26xzo9UPYGSnH9FllJ6cTshGMlWooPHFmeM43bydrnjatBuLnOD7Mjzgl1p+CdkxMicDVeLZ0TMo50w8JyjBBd1v1lKcOwQUsGRQjiomnpve3SnVo4CxskNCBR0ZnblYBCXqxuqKy3kluWqjHlIZeS6WM6TY/4nvKRUBEib5P70bxJM3QWQrDRUkn5EHQnaEoO4FrN7vwRZBkubDmxFOWFV21GZ2uyh04nkuiGnYjlncLsKWlB0HAhxRdnTKWGmLrecOJ83+invE/UXfs1NqZSduK9FamaKsh+2Sn6XkGTtAXhmrMehBBAHwkM4xpfDt6NkGAHwsXZemjWosvWplA4zs9CNojoKWskMuVA63WoWxQCjkvB21Z6M7ppiLZeLZ6UtrkR3iIdJO5q0GskOVHWUrqCDol7FKoeyYDXjU8uzQcRH6pYSwjSGgsmenzbJnRw50MyljFWlQ7lKMLzEdqyEhnRXQIE08dynJjicoHw9m6o7SrwMAkc1wODga6mZEdiIGyk59wI2tlOxssjQEFBB9KmTat27J0wB04nkR5mRAXs++dJbm2JQaPVbIjtuJIIzJjqzsWCB4jWMAAGH/MAAGnhjLBmWDbiyAnkNrOXtlLKIAGnVjEYtDydOTeV4+nqXje1DIAx4OxLgS+nb0zreQb8LJdapSKIrspFIpLF++HJlM4TkSAwU8LyiUHaNREc3qjAILplHSkbV4XbfqhN6lKmPpeHakgzaRymqQHZMyVhVMytacHRTvktNSa3Jaz0vo2TE1KKu6sSwoOzRjx4JBkYyjsOHZCZhknNBurCK3JynXAtY9O0plx5FLrqlvx8SkTPw6JO1bUtgMs3aoshPQvcDWBdxU2cl0b7Ls2QGKaz8vNj2ZoMbrgsT3yqLupBRmfb3WcyBX2dHpxkpb9OwAwPF/BU5+EFtrJwEwKBOVXNkpsIxlaFA2uBEuBkqlkyo74u8RLpVvx+DmMssL+Gyt+P47JNmJx+M4++yzEQgEsMcee9BE4wsvvBA33nhjSVdwZ0FPXxok86pBk+zoqCgmnh0A2KVFHPCWSPP4alOv/Jk2DMp9aS2yYzAqAqgOZUcrYZZcFIPN+XdpJerGAnTu1jNJWWFREi0L4yKidpQdOnur1dSz43c74XJwirtqHc9OibqxuuNKsmPNq5JKZ9RDQJWwalImyg7J+ZB+I3lkhAbhoJ4d/TJWjdeFdojrlO7ZaLkbC5AVx0I6smIlKmM5HBxVd8IFkB1BEAxzipQEyigywetyIGRmULbq2QHElN4fzEVI+m660+VLpOzwCs+OHR+VJc+OWbp+oSDHssNFfwdCqLpK1ZHV1y0POK5Rk51vt4QRTWZQ43Nh98GVMycDBZKdK6+8EkuXLsXbb78Nn09mwYceeiieeOKJkq3czgSSnlznd8Pt1PjZiYqS64+xQCg4jqNT0IlkmEhnEUtlUUuVnQbN1/qUZIe0DFKDskF6MlAVg0A1734NJFWqRqXjYo5QATA0KBNVx+mVySNgyaBMLqJW0pNB0pNrBis8O9pkh+PEi12IM1N2StONpSxjWc3ZycS74OSku4Hc1lWrZIcoO2Nni3+nIkAyQn07miZhhbLj07nAchyHsFs8LoXeTZY9O4BC2Skga0f2oxWfEFJfoElZEAQc+9f3cMSd7+iW4gjZqfW54HToezLU3VhFjotQgBwv+mWs0ig7ZHZaLeIFdWMZenaoslNqsqOIm5AqBqSy0JUtEdkhx2WgSb6pk0D8OvuMajTcN/oDBZGdhQsX4q9//SsOOOAAleFojz32wKpVq0q2cjsTTKPA9WZQmRiUCUgL+idrRFWBnNTqzMZFeBQXbl1lR68bq8qVHQ2znOokW2SKsqZBmSoubepypJ0yltlFNJOS1SNlGSsV0Q0trPG5TP0SpTIoK8tY8bQ1RSMbFb9PBIH8mAMrWTs8D2z/Xvz30KlySU4RLGhYxkJQ17MDADGvSHa4yGaqjph5dgBl+3kByg4dFVGcsgPIXhq7ZaxwXwZfbwpj9bYYnvl8o+YyvTRjx/j38LodCJqUseggUK0bQh2EvCZkwnaooPY5OuUSyU49F7OmPJH1s9l6XlJQc7KsqtT63HA5uNK1n2t1n0og16NKl7CAAsnOtm3b0NKSX9qIxWIVdVtXMwz9OoC+imJRPSE702dru5GV/EEO8Kghd/MmBuVEOisvY7kbS1Fi4+231pYC1LNjVdlxeUVJFyh6PpbmBUzvsy0kKFseFUHMyQ63qIL46uTYgLi2ulPrc8sXmjInKBMVEwD6Utb2C0Eib72chtRtRdnpWScSSacHaBgtlxAjZmSnR3xrIWDow0gExM46d6wdEeniXmtF2SGDYwvqxiqdslNboLKjLEk+9P5a8Brzp+QhoMYXaq/LgYDFBGUyXsIKTMkEIS+Wx0VoKzsJiew0OPpsXeeIB8/IoEyUneayjYqQj3mHg0ND0IPuUg0D1bm55HmhaszJQIFkZ++998aLL75I/082/N///veqHMBZDTBPT9bxxyinnhsEk+0+uBY1XhciyQy+3RJGdzyFGihUhKI8OzplLHoACaYzn8qFuKayY0B2OK5k87E0L2ARRXlJCaWyo7MdKdkxMyjnqkccpzAp64+MCNH02vLm7HQpLqhWW88RE8lOuFCyQ/w6g3YVB7+Stv/IFlrGMs3ZMegAygbF93Nl48hKr7Hi2SHHV0GeHaLsFOnZAYB6KdnYrrLTo1h+9fYYFn2fb4LvMRkVQWCpG4uECjrtG4BNlZ2sVbKjfY7uc4jrXOewd64zK2OpmlfKPCqCoEk1MqJEZayc8+3KbVF0x9Pwu534wRDt609/oqBbhhtuuAFHHnkkli1bhkwmgzvvvBPLli3DBx98gEWLFpV6HXcK0PRku2UscgckZMWD0a191+F0cNh7VAPeWr4Nn6zpQnONV+7Ecge1E5AB+DzihaAvnYXgaxCTF/p6RLWGEBi9MpY7AIADIIjL6ykGZQT17Cjvfo3KWIB44Cd6ClZ2gkbTrMkQ0FyyQ+8WBd3tSDwHpmUs4tdRpMgiMEiUkw2CBemFRm87KQ3KZGRIAVApO+ms9piGHHB94h1gxKFFdiyUsYhfp3m89BrpxBvZAq9LbE3WUnaERC84iDk7PgM1IRiqQZcQQiMXRaCvHUCr+fwyKJSdAjw7tERbZDcWANT5xffosUl2lMoOADz4/hrMHq++IaPpyWZlLGU3ls4+WIiyQ8mEaTeWVYOy9jm2zykSBmoNsAhlzk6WF/K8K+FEGhlJMSt5qGBOxg5BU8iDnm2l8uxoZ5oRv85eI+uLSgAvFQpagwMOOABLly5FJpPBxIkT8dprr6GlpQUffvghpk6dWup13ClAarK6OQpmZAewUMoivp0udMdTpoGCgHznKQhA2iMt19ctr4/Lr+/xKIFKUgwEQdD27BjUkAEYTpK3Anq3rnUB0xoCCshlLEBXBbPs2VEqOwQ0a0df2QmSkqZZzg6fKSpVtUvh2cnygvaYhhxwfaKyE3Vq7KtkOyZ69P1hRNlpkcgOIZsmnh2hT1Z2vAaenTq/G1sF8fgKJcVmglq/OQmhnXsFKDuEIIVK4NkhJSa73Vg9EtkZPSgIBwe8u2I7vm+P5CxjUdlxKZUdk3ERtjw7Upmo2ARl6tnRJhwkl6bGLtlRkFWt/YCUsGp9rtKTgpxREQRNQUWKcsmUHfXNJS1hjdKYq1gB2P5l0+k0zjrrLHAch/vvvx+ffPIJli1bhn/84x+YOHFiOdZxp0CX6Vws4o/JITsOp1wCsejb+WRtFzqjikBBA7KjNGUm3QqyE1WQL6O78grOx0pmeNrOT+9+BQHoNVN2isvaIXfr2t1YOmUsp1v21eh0ZEUUCcqGMCI7einKfrfc9mum7ABFlbK6ctQAK1k7DknZiWmRHW+tvG6ETOaCKju7i39Tz85mReu5trIDSJ4dA7JTr8jaaeRFQmlL2SnAsyMT+VIoO8SzkzJZUg2RyAj4QVsQh00QlcSH3l+rWkb27Bj/Hn4nDx8nkS2TMlZpPTulUXYinLjONYI9suNzOyl501rHsrWdA7rKTmMp52NplLEEQagqczJQANlxu934z3/+U4512alh2FrI89oTzwksdj1NHFoHn9uBrlgKn67tkuVWHXMyALidDridIpnpkwx4KmUnl3zlooKTz5WtsPRClQzLQVq5hIOgaM+Owd26XhkLkEmrzkk3QluaTS6iUS2yYzIfywv4OelCp+fZcTgUpSyT0Qw6iKcySORk61hJUXYmRLIT1yI7HKcoS2n4dvis3InVIpEdoqwpRkZotZ5zJEGZC9LjQAv1fjfaBdHTNpgTT+JWWs8NM5lMIJvvS9CNVeDIiO5YEv/1XIU/bPg5zt5P3AbPfL4R3Yp4ASujIgAg5FAoKxrKDs8LlJDaU3asdmMV13oeFsTj14ukbeUzZFBqI9eG8pIddXl4UMiD3lLl7BCyo8jYWd8VR3s4CbeTw5QR9cW9f4lQkGZ2wgknYOHChSVelZ0bht1Yfd2iJweQM1OU8FgjFB6XA3uNEE/IH6/psqTsALK6EyN+iXQc6JXaTPX8OvRDK9d+Ti4GfrdTroOTA8/fAHi0p1gX22Kta1AWBP0yFqDoyDIuY1lWdkIKskOC+HTITpNLcZEz8lbRkRGFEUFSrvW4HPR7WFF23EnxhBt312svYGRS7l4rXqhcPqBhlPiYlTKWIIBLSlPPXTWGvqL6gDwfqw1dCHic2nlZOSgmQVl3yG0BoMqOTbLDh7dgomMtWpJrsQ//JfYYUotkhse/Pl1PlyHvaZSeDAAhTryoZ+EUu+ZyoFTejEqKuVAagDWny5coVDAsKErRiV7NZfQQMsgCKlvbOaAgO+pjvjHoVSg7PYW/fyIs3xgpznnErzNpWL1hpEN/oqCjaNy4cbjuuuvw/vvvY+rUqQgG1Sz9oosuKsnK7UzopG57jQOJqCi+eu2MBxtKxLTRjfhgVSeyvIA6Z1R+XwP43U5EEhnEuQDAOUXiRe6U9TqxCli3UoMmzCrvfM3MyUDR09p1W88TvYokUS2yY5yiTEMFTT07irlYBISU6nh26l3iiTwDF1x6s38AkexEthRMBJWkPsMLiCYzlpQdQnYSumTHwKRMO7HGiWVfQP79o1vhbdOZjZWKghPExzIuHbVLQl3Aja0Qyc5grsuSqgMoEpSLUXZKGCpoV9lxKH5vbvmLOGvGb/Hrp5bi0Q/W4ZwDx8DtdMgGZRNlh0QfJB1+BDSIpVJ5K0TZyfICEmk+P0LAbqigU/v4iKdFdaeWi4vHul4khwbI/hLWUHbKNioC0O/GCnlkz06yF8hmxC5GuyD+SG+d6jOqqeWcoKCj6IEHHkB9fT0WL16MxYsXq57jOI6RnRxkeYF2NWh6diyH95lfnMVwwRUAYFnZofOxMrxY8op3KsiOyQFdSbKT1MghCRuUkQiKLL1RspNrUCYHvq9ebUgmMAkWjFpN5tUsYxl7dupd4v7Xx/lheFkvcmQE8es0BDyUjFpRdjwpM7JjoOzk+nUAqVONA/gMGiDeeeZ5dqS786TgAqe1vRSo97uxRTIot3FdltrOgeKUnajWkNsCQctYNnN2vDFFkODyl3HMEbdi/stebA0n8PLXW3HcpCHUB1RnkrNDzMlJRwBamishoxwHw5JiLgIeJzhOFFYjyXQ+2XHa9ezokJ1UFmEExBE8NtUQquxokJ1tFfDsiK3nCpEi0QsEjY3ET366AX6PE8dOUjR+6HRiEbKz75jqMCcDBZKdNWvWlHo9dmp0x1O0k7dBS+rVGxVBQFOUzUtFU0bUw+3kkM4Kljw7gCJrJyVNPleRnWouY9nM2CEokqAR02kslVG3VZt1gRmMjMjyAk1kNixjZdMKP5UNz450Vx2DCdkpcmREl0KS5yNiScFK1o431SN+rKdeewEjspPbiQWId6mhFiDajkF8JwB/vrKjyNjxmmTZKMtYg7lO68pOEQnKcSv7g0WQbqzevrSlKACCYJ/CEB7bBu/Wz3HafiNwxxsr8OB7a3DcpCHolQajmik7JFAwwWkTSzoqwumwFdrHcRxCXhciiQyiiQxacndwquyYmLOzxgbleDqDXiGIYdx222Uso6ydznKNigD0u7FCXmThRETwi8Gzfd2GZGdrbwK/+c+X4DhgTHMQe5DcHI3z7ZbePqzvisPBAVNHao8pqgSK7nMTBEG7TspAQaT9er8bLs25WMScrDeDSiIUFi5APrcTk4bVA7Cu7Pi0ggXJhduM7FTUoKyVnmyhjFUiZYcXckyvxK+jVFyUMFB2lCdBwwTlqESMHS71wEwTz06NQyQ7UehMdCagZKcwg7Ky65DuV2aqRjYDX0b8vLRH5+RoVMbqkMiOUtkBqLrXJIh3mbpkRwgadmIB4rG7WVJ2ark+tHmtGVQNAyhNIHt2SjcuIqMg1ZZel9qqfuC7F3DqviPhcTqwZEMPPl/fTcdFmHl2/FKoZR+nvQ/StvMC2q9rjEzKJfLs9KWyCBNTr5S8bXn9JCWQlKqVIBYH3ViSYqBjUCYVBqvt5ys7xHOlIAA3vvyd/IQG2SGqzg+G1pWEqJcKBZOdRx99FBMnToTf74ff78eee+6Jxx57rJTrttPAdMgbuYDplrHseUxIndRsLhaBZooygVk3VgXLWH3pQpUd8nsWZ1AGci5itBPLTNnJP+mSk7TH5YDXIMlXNie3it1TBGTfSceAVD6ZIm3nEd4i2Slwe3YpyrVkv4qbKTvSiZYXOGS9OvuqnrKj6sQar/mahqxIAPXKWBH4TU2UtX43+uBDpyD+PiMcnYbLExQ69TyT5SmRLoVnx+d2UBJhx7fTlBb9YfHhB4kPfPcimkMeHDdZ/G3vfmslzVEyVXYEcR/s0yHcNFDQaP/XgVG3U6kGgcaSWbn0Y5PsGJWxaDdWTTnLWGplp9bngtvJoQfWOrLWdMrK/bsrtuPdFZK6rHFz+THN16kevw5QINm57bbb8P/+3//DUUcdhSeffBJPPvkkjjjiCPziF7/A7bffXup13OFhOWNHzx9js1REyQ5VduoNl6eenZQG2TEtY1XesxPU8uwYkZ0ilR2ng6ODAFXGU638GyXISVRL2SF+HbM7Ia30ZEAkKaTDRcOkHJDuqiO8F2mtsQnK9wEK9+xIZazGgEe9XxmBzMVCEC63zjFCTqaxbeqLVtcasfzg8gP1o9SvkbZDfVoiOwbKjlF6MiBu81qfC5sEUX0dymkraLkodOq5kiCWwrPDcZw8DNSibyed5dEqiOem7JQzxP2razWwbTnOnDEKAPDGt+KNmsvBmSpQPkHcB+M6ZIdm7BSg7BAyESmnspPO0PZz291YPv31o91Y5VB2dAzKHMeJWTsWlZ2128VrDzlO5r/0nTgnzUDZqSZzMlAg2bnrrrtwzz334KabbsJxxx2H4447DjfffDPuvvtu/OUvfyn1Ou7woOnJuqMiTMpYFiefE+w3pgmTh9djqF+qURej7Oxwnh0r3VjFE7SgVlu1Uds5oChj5Xt2iLxtOgRUL7SQ4wx9Oz5eJFhR+A2nLxdrUFaORVHtV0aQyE6XUKPfzu1vkC9aEYWPZBsxJ++qVroAqrDVZsTfIy9nR+HZMStjAaJvh5CdNkHbCJ6LQhOUCTlyOThbnUlGqKft59aCBXvjKUrq/EMnAmNmiU989wL2GFKHfRUXs/qA29Rn4+XF/T6mR3ayRZAdn8GwzRIpO/GUQtmxaVCu0Qk+TKSzVNUtr7KT79RrCnotz8ciZOf8WbugxuvCsi1hPLd0U1568vZokpa89tkZlJ0tW7Zg//33z3t8//33x5YtOgmnAxidpsqOWRnLXkqxz+3EwvNnoMVlPPFcuTygRXY4tS+kBOtWStBuLHLnm4zKd1xlVHYA5cgIpbJjtYylQXZsj4pozX/OwLfjlIIWY4JP0zdAUaRBmXQdNgVlZccq2elGjb5fQxksqCxl6fl1AKrs1KRFYqKv7BiPiiCoD7gp2WnOtpsuD8ikOJHmkdWYGK6HqMKvY8esawSi7FgdGRHu3oaQZGx3NY4Axh8tPvGdOAT6rANG02VrTUpYAOAhZEfQvqgn09Xt2YmnsgUrOzU6ZSxSwvI4Heaqrl1kM7KKrBEkKrafWyxjSWRn6sgG/L/ZYwEAt7z6PYScbqzP1oqqzm6tNWgoh1JVBAoiO7vssguefPLJvMefeOIJjBs3ruiV2tlgeeK5XjeWxVDBPJAD0rT1XBoGmlvGCjTJuSV6KPLiWAyIskPLWOSO31MD+DQGShIU6dkBlMNAlZ4d7TLWkg09uG/RKggu/VDBqNVREVEdZQcwztqRyGgUfoT7DFSGEhmUGwIKZcdiGatbqDG+q6/RIDtE2cn16wBUYQulJLKj49kJI2g48Zygzi+TnYa0NbKjVB3tdGSRZUtp8JRHRlgjO33b1gIAulAnEvVdjwTAAZs/B3o34dDdWzG8UdynzUZFAIBHUhcjgp6yI+4nxZSxtMmOdN4VsiIB0IPJbKy+VBbhQj07Ot1YykDBUpFaCuUNqEaQaJPFkRGZLI/1XeK2GzUoiLNmjEZbrQ+dPT3gyOsksvNxlZawgAJbz6+99lr86Ec/wjvvvIMZM2YAAN5//328+eabmiRooIMGrZmWsfSUHeut5xTpPrmV0kKoICC1CCvJjpXQrAqWsUhXCTUM62Q+5KEEyk5el002A0TbNT//6ue/wdINPThkMo9dAM07THkulslFI6Lj2QGMs3ak7xqDVWWnwDKWIiCNenZslLEMSzZU2VF0ZBkqO+LygSRRdnLWQ7pghYUAJfxGUJaxapNbTZYW4XU54HRwyPIC4qmspXlagFK1LCHZsTkyIt21DgCw3dUqxinWtALDpwEbPgaWvwTntHNw1ozRuPa/yzC8USetXAFPVvaNaaEYZcdwPpaSvGQSgFMjQZzngaxU3tMtY2XkEQu2lR3tbqzOWD+MinB6NQlco8VhoJt6+pDhBXhdDrTV+uBwcLj08F1x939eBQAI7gA46Ya6Wv06QIHKzty5c/Hxxx9j0KBBWLhwIRYuXIhBgwbhk08+wYknnljqddzhIRvQNHboVFxm4CUqYwGQD0bOkZexkAtdz45ZejJQUYNyPJmToGzFnAyUZJ3z8lNi2wCBFxOoc7bjRumuaG2vpCxotp6LJ8HaQiaeExhl7UjfNSb4EbZCdgr4bdJZnibENga9tDxq2nYdF0+QhmUsIL+Mlc0AnWKApqayI/1GvnQPvEgZ5OxYU3bq/W5sEsTfONinkfejAY7jCpqPJauWpYvatzsyQugWR0L0uBXEOqeUdcb0Ubj71L3wu6M0yGYOXBnxhkiP7MienQK6sQzGMagSkbM6fiXl4yahggDshwrqGJS3R/p/VARBU0gRLCgN4tXCaqmENaopCIc0lmfuXsOwT6NI1Lqc4rDo3r40lm0RFeF9q5DsFHzbMHXqVPzjH/8o5brstCDs3TA92enVNJEBsG1QBiAfjN7afONmDnweRblBRXZMzMlAhcdF6Ck7BuZkQP49sykxaExrRIcJ8pQd4tcJtapKf8lMlnq2VnVncSig6dmhZaxiyI5R1o504ovCpxlZT1GEskP8OhwnXljtG5RDGG1IdnKydrpWi9vQHQDqRuQvT0zNmQSauR6ksjmlP4Vnp96iZ2ejpOx4kp3ijYre/DUFgh4x8M5O1k7evl0CKIMFrcAVEdOTo37F7zb+GOD1PwJr3wX6euDw1+OoiQZp5cr3y4okP8xrH29FeXaMWs+dLjGXis/o+3aUjxsZlAtUdvRaz7fHTCwOxUCnE4ugKejBEgvKDjEnjx4kJy47HRzOnOgBPgKWx2swojuO79sjEARxuZZak4iLCqAgZeell17Cq6++mvf4q6++ipdffrnoldrZQEOjtNi7soSlV7MtpIxFDkYTczJgpOxUdxkrnjsby7Kyozj4C05Rzrlb1+nE6gjLHSDrCH/QIDthK56dbEY7PZmulLlnJyb4jA2qRXRjKf06TgcHv9SmasegbDhcM1fZoZ1Yu2kTeo6jpLANXYYJynkjBjRQ53ejF0FEieeEDMs1gawCWic7eaplCVDnF/ctq63nnphIKhMBxc1D01hg0G4icVj5hq3Pd2UkspP1agbRJovpxrI8+VyP7EjHKecQiZEG1J4de2SnloYKant2BtWUQ9mRfHd6ZCdkrYxFyM4oBdkBgN2D4jlis9CI2177vmrzdQgKIjtXXHEFstn8A1cQBFxxxRVFr9TOhEyWp4ZAbWWHjIowUFEKMShbNCcDBp4dK2WsCiYox/OUHYtkx+mST34FTz7PuYARc3SOcbgjIp9c+wRp+xskKBt6OmLbAAhSqUxj21jy7Ji0npOk1QK2J83YkfZzuzk7XYLNMpaRX4dA8u20cd2GOTtWLrD1AQ8Ajvp20LvecHmCQoIFy6LsBOwpO2RURDqUo5TSUtYLtj7fIe33McFHgwiVSEqkuOSeHcC8/VzZdq5x05nJ8khlebWyY2NyAFm/vnQWGYVRngYKlkPZIcewRicWIB6nVgzKazrF7TZ6kFrF5KTjcIvQiGeXbMLzS8T/V6NfByiQ7KxYsQITJkzIe3z8+PFYuXJl0Su1M4Ekyjo4+WSjgtkQUKAw9YR0C5iYkwGoW4R9dQA483Wi60ZKQklxblM/Ik5DBYmyY7GMBRRdfssvY2mTna298sm1D9IJTStB2UoZi3xGqEW7S456djTSfWk3ls/Es0N+l4itkzmgTk8GAL9HI4tIC5JfoNvUoCxt12i7qHIZdWLR14jbo5XrNszZMUtQBuS5dpTs9GwwfQ2g2FdsBAvGyqLs2MvZoaMi6oernxh/jPj3itfNs2sUcEienSh8dA6WEsSzU0iuUMmUHT2/jrQPU8+OkLV17lAqtjHFfqDsxio5DDJ2ALHSILee94gmbQ2s2S5+z1FNamWH3HS0DB0NQQC29Iq/7U5Fdurq6rB69eq8x1euXIlgMKjxioGLXGk/D2ajIgD5wpyO6e6QebCh7KhmGDmc8mssdWMpzG/97Nshd8q0BGFV2QGKVqSCuQZlnblY7WHxBOB1OZCAvrITkYyVhlkbpNtLqxMLUHh2DJQdwUzZkU6MAq87nV0PNClcIvXWPTsi2emCSet5sFksMQi8+FtYUnZksqNqPRcEW7OxAHn202YQZcca2QkWECwYy41VKAFsdWOlYqjJir+Pq3Gk+rkhU8TfNRUF1rxj+fM56WYtLviQSOefx4hnx2uSZq0Fo0GbAOwpOxogRDXt8EJwSOqrDZOyOAZG/F7Kmw2q7JR14rm2Qbkx6JHLchCAZH5pLpXhsalbLLuPbs4lO+LN5cHTptAp9UPqfBjWoD3otdIoiOwcf/zxuPjii7Fq1Sr62MqVK/HrX/8axx13XMlWbmeAKXM3azsH1Dtr2qK6Qw5EW54d6QRE7uQaRpl/jssDkIO/n307RFUJel3iSUwqh1giO0Vm7RDVgt6l6Uw8J2TnkN1bKNnhNWZX0XERVpQdrYwdQN6HMn352yKlMCgbXezcAdG3ANgu8ZF9vTGUQ3aMyliZFPUWmJaxHA45a6dnHdApqchGyo6S7CiVnVRUJE2wruwMrhNP4mEvWQeLyo6XKDs2urHK0XpuJ2dH+m5hwY9gXU6wqMMB7HaU+G8bpSxCdmKmyk4h3VgGCcqAdWXHqX2epmntbhc4ck4tweTz7eVUdkwMyiGvC4LLhzgJedQoZa3vioMXRMLenEvIpJvL5iGjcfr0UQCAGbsMKn1eUIlQENm5+eabEQwGMX78eIwePRqjR4/G+PHj0dTUhFtuuaXU67hDwzw92UIZy+VTXIAsKhG0jGXBs5Obh3LS34Ef/QNoMW8nBVAx345qKjT5HR3u/JEXWihW2fESgiiduPTKWBLZmTy8HjU14kknlcgnhaQl1dCgHJGUHa30ZEAsd5KTeq66o1B2DMtYHCcTQZtkR5meDCjCKo2UHamElYUDEQSMDcqATCbXvAvwaVFZrBuuvzwxKHM5BmXpQpWGCwl4TGdjAcCQej8enLc3jp05TXzAtrJjpxur9K3nJPgvksiYpzlL322T0IwGrfI78e0sf9m62ixdfOPw5ZcUoZiNVYCyYzR7CoBMYgpVdqRt5/colO9Ch4FK68jzArqkbqw8IlEKmBiUOY6TggX1U5TXKMzJKhKTScl+09oh+O0R43HHjybjSgsRBJVCQbcNdXV1+OCDD/D6669j6dKl8Pv9mDRpEg488MBSr98Oj06ziedmoyIA+QKU7LWuntghO7l34C3jje+Wc+EJiQdKPyo7eVOheyQiYNTVpkSRnh15XISxZ4coO621Puw6tBVYDWSTGmUsK54do/RkQJ6P1btB9O0QZS6TFIkBSKigicLglfY1u8qOomQL5JRH9UCHgNZAgMPcnErIzqo3xb+bdzPe3tLyrcgpYyXEC0GMCwLgLCk7AHDw+FYgKB0bdj07dhKUy6DsKEc6hPvSxnH+PaL5eqMwCLtpkZ1RB4pm9mg7sGkxMHwf4w/Pym3fMcFLS1ZKELWnGM9OKsMjmcnmZ/VQZUeH7GSNPTuEsAc8TtkHaVvZUQcLdsdTIJyzLKMVyPGrY1AGpKydvhCGcF2aZEevE4ue75weINAED8fhhCkWvJIVhK296sMPP8QLL4iyJcdxOPzww9HS0oJbbrkFc+fOxbnnnotk0rphbSCApifrKjtSGcuoGwuwHyxIPTv1pov6rHor9FCB+Vh5U6GjpCXbgqkaKIGyo7iApeKKmVy5ZEc8HtpqfZgwUlRkHFl9g3KtUTeWUXoygZZvR/EdY2YGZaDgrJ2uHEnekmdH0XYOWLjQEbKz8VPxbyO/DpBTxlKsh7S9IpLh1CrZASCXeSObLZny5ZiCyio7bqeDkgIz305GChTcJAyiXh8VXB5g3OHiv62UshTl97heGasYZUfHAExh6tkhZMdM2XHJN5B2gwXJZHbpWCc3B/UBt7miWQiSxmUsIDdFuSfv+TWd4nYbk0t2lP7IKi1b5cLWL3zdddfhm2++of//6quvcM455+Cwww7DFVdcgf/+97+YP39+yVdyR8Z2o/RkwFoZC7BPKMiOa7cbqxBUIFgwbyo0beE3IAJK0HUuQes5uctxB+XWbYhRDEplZ89R4oXXwyeQUNzlZ7I8/e2Ny1gGgYIEWlk70nfkXT5k4bSg7BSWtdOVU7IliobhuIhcsmOq7Eh3j5LfxlSBlH4rP5eCL6P4Por0ZACWylgUwRbxjlbg1XO6dFCQslOG1nNA9u2YkZ10pzgqYgua9VO9c9KUDSGpvlk4kIRbs4xFHitE2XE65KRq7cnnZp4ds7lYipJ5gZ6d3PlYZTUnA6bdWAAwKOhBt0H7+VpFerIKdjpfqwS29qolS5bgkEMOof//97//jWnTpuH+++/HpZdeir/85S9sNlYOSE22UcuAxmdlU61ZgB9tCbZaxrIfKpjK2JvMTFGBYMGY4uTDcZyiq81CBxlQ9ABTVet5RNGJpbjLiSTl1Ny2Oh+GtYheIicnYOm6Drqc0rAYLJrsaGTtSN9RkAheJJHRDHWjKHBkRF7rubRfpbMC0rlDOAkUGTuAhUC5XPO5mbLj9oP3ib/7IKELPNm/peOjVyhA2XE4gLph4r8t+HYKCRWMWfFwFQCrIyMEqUTX42nVN5zucqjoketcAWz73viDSScW5wfAmXh2ClOzqHKiNTKCKjtmrefayg6dVaby7BRmUCY3G/KNcJmmg5t0YwFS1o7B5PM1emUsO52vVQJbZKe7uxutrfKd86JFi3DkkUfS/++zzz7YsMFaHXuggCZkau3Q8S75DpWUH/RgN9m2gFBBwMLQRi0UOTyyENCMHXIxIGTHahmr6JwdRYJyWKcTS8qdqPO74XM7wXnkE8aSVVvov8nJz+sy8KzwWYV6ZYXsKJUd8Tty0nYiQyl1UUCKsiAI6M5RdnyK4Zq6qqHUdt7J21R2CCx4ywSt9nPp+OjhxQ4rK63nKhBTtAXfTjA3k8kCKJkvYc4OYF3ZcUqjIiI+g1EQvlpgzEzx32alLGkfTHLi753U2B8IAfIWWNIJGY2MMPPsmCg7cU3PTo+t9avJGRnRWW5lJ2Wu7DSFvOjVUXb6UlmanWNYxtpBYGuvam1txZo1awAAqVQKn3/+Ofbbbz/6fCQSgdttbarvQEGutK8Cufv2N4qpvkawOzLCRqig8m66oFJWBZSduFJWBuyXsUrk2elLZcUZTUDehZj4dVprpZOZ0w2eE1/39Tp5ara19OTt0qBRh3HJM6BBdpKE7ITgkrKejIMFSYqydbIT7ssgI6kmZF/3OB0g0VK6KcqSstMp+QYsd2OR9bQgo3MS2WnjumRFgZKdApQdQPbtWFF2ckeLWIAcmFlaZYdkBfXGDYIFMyl44qLhXzUqQgu7HiH+vWaR8XLSPph0SGSnxN1YgEwmNPOMilR25DKWq2BlJ7eMRW+Ey9F2DlgzKAc9uiMj1nWJ5/M6vzvfQL2zl7GOOuooXHHFFXj33Xdx5ZVXIhAIqDqwvvzyS4wdO7bkK7kjY7tRNxa9QNuZQWXh4szztNvEirLjcHDUs2DYOaO7bhXw7OR6GqIWvU8EpVJ2UhkI6z8QHxy2t2qZrQq/DoHgFk/232/cRks7ETsZO8FmY2Js4NnhPDW0I8dSsKANskNKWCGvi3bCcBxnblImnh3phGuq7IRa5RgGs04sCZxkGm9Bj9x+Lt0MFOTZAeTBoz3mIyOKUnZKaFAGLCo74U3gICAhuMGZHU9tE8W/O1cZLyfdCBmRnWK6sQD5BkRz37YcKqg/8RzIaT23aVAmNzPkRsPw2lAKWDIo64+M0O3EAnZ+Zef666+Hy+XCzJkzcf/99+P++++HxyMzvgcffBCHH354yVdyR0Uqw9MBj5p1WSuBggR2PDvJMADJm2CB7AA587HsosKeHQCKdGG7np3iDMoOIQNs+ER8cOQM1TLtGmTHIU3J5jJ9+GqTeGcYlTwGhv4Ms/RkAgPPDrwhSqgMgwULMCgTb1pDUK1OmZrfKdmx2I3ldMllvGZr8QiE7LRxXXllrDDx7OS2KpvBjrJjM0E5yws0YdjQw1UALAUL0oydQag3m9nUKN3c9m7UHINCId1UpCWyo3WeIUS0kNlYgMnIiCLHRZCbwIC7CINyThlrO1V2KmdQbgrpe3ZWk2nnTYHcl+2QZMfWkTRo0CC888476O3tRSgUgjMn6fKpp55CKKRvhhpoICFrTgdHTzIqUFOtxlDHXNhRIshB6PIBbm1ZNheijJ8usIxVuW4sejGI2TQol2g21kRuDbh0XAwyzLn4ErLTpiA7nHTS9SOJT9Z0Ya8RDfaUHb2MHQJKdhTzsch39IR0py+rUIBBmaYn51wcKdkxKWN1Sd1YJHbeELVDxLZvq6GXtIylSFFWdGN5XA44tEa5GMGOZ4cmKFs7tpRdWyVXdqyMjOiRyU6DVtu5EsFBYjkxGQa61+hvE+lGKO0UL5xG3Vh5GTkWYezZkfbLrE75zsygrLy5ospOl631y01Q7owRZacMZaxMSs4OMjAoNwVlz47Q1w3lUUCUndGDcl6fzciZXztrGYugrq4uj+gAQGNjo0rpGeggF4CGgEf7ZErbzm2Usax4TMj7mpmeFbAU7a+HCiQoq04+maRM8CwrO8Wts9PBwetyYJpDms80cobYpaPAVsnc11qnOIG6JSWBS+GTNeLJkgYKFpOeTBBQKDuk40pL2TH07NhXvXLTkwnMy1iKIaAuh7Wo+T1/JAYm7nak6aIA6N1nixbZEQLwFaIkUGVno2mCsF1lh3T+kH2slLDUjaVUdszIDscBjWPEfxuVsvLITumVnZoyKju0bO51AU3jxAe3r7BVysrvxiIG5TKOigBMQwWpZyeeW8YSw09H5Uw7R7Rd9A86XNZtA1WAMiQZMRAQ5q67M1vN2AHsGZSttCjngBg0Exp3XKaoiEFZYeAkCpnVURFASdSooNelIDv75z3fHpEMyjWKE6jk2fEjiU/XdiHLC/TkbGniuVVlJ5uUvxvpyvDUUGUnbKTs0HERYePPUiA3PZnAlEQrlB3LXo19zwV+tVS+yJpBa2SEYuK5vxD1pHao6B3KJrUHryogB1BmjVv+JSgDBUs9Z6jeL24fq8pOvVZ6ci6adhH/JrPKtCCdGzIS2ddOUCbKTnHdWMV5dvQMyopurPrhIuERssDad62vH5nflWdQLuOoCHfA0OMX8DgRd0rHe6JbvkGCooyll55cM1gcHL2DgJGdMsKwEwtQkB0rZSwbBmWzsQIaMC03GKECZSw6F8vrVI/csHpxKDJnBwBCbmAfx3LxP1pkR1J22jSUnQZ3BpFEBt9tDVtLT7bq2fEE6WfQ/cu2Z8f+b5ObnkxgmM6ditPJ6t1mQ0CLgTQ8dBB6kUpLFzvFxHPbnVgA4HTLx5eJb4coO1le0Czf5CKvRFtCUIOyoWdHTk/WnIuViybJt9NlpOyI+1LWJZ7HDEMFC/bsqMmECiVSdmhEwdiDxb9X/c/G+slkLJ6SM7jKYlCmnVjGthKO4+AKihUAjs/Q7RRJpKnylJ+xQzqxdhy/DsDITlkhT7Q1SU+2Unqx46OwMlYgB8UZlCvXjSUqOzZ+RwJlgrKFu20tTHBtRC0XR8YdAlonqp7L8gK2RUnruZLsiMrObk3iifmTNV10Vk7R6ckEub4dpWenXN1YOsTekERLngfe4UYU/oK7cEwRHIQsHHByAgSiAiqUHdvmZALq21lnuJgyBdlKR1a5OrEAReu5ZWXHQpQIMSl3rtZfRtoHeZdRGUt8rFhlx9CzU7SyI23LsbPFv22QHbmMlaaqjtflKOlIEAoLnVgEwWAISUHazpJJeV2neBMyKOTJvwnbAc3JACM7ZQXpUNGfi2WnjGXDs1NEGWtHydnRVHbskB3i2RF4IN1X0DpMFb4FAPQ0TcmTijujSWR5AU4Hp5apJcP4rk3i7/3Jmi554rlhGcvGNlX6doACPDsFdGPFdciOEYmWSlgZbwMArnzKjsOJLkcjAICLbBHJrVLZKfRiQ3w7JiZlpffGysgIskw5lZ2ePh2jLs/TO3fLZMeSsiOeG3h3+ZSd4jw7ZqGCOQR01AGiZ6V7rZyzZbZ+0rGXzPA0rG9QyFvyUqX4IeadWARNNd68yedr9MZEADtkxg7AyE5ZIXeoaJAdQbCXDVNmz05RZawKGJRJoqmo7JCJ5zbIjltxEBeoSE3ixTlxHY1T854jGTvNIS+cSnO6VGIaWyceeqKyY9KNxfOKMpYVZScna0fDs1Pqbiyq7OR6doxazyWyk/aKPquykR0AXQ5RqndE28VjSBDXJwJ/YQZlAKiXsnYstJ+LxEVAX8z8N1WNJigVUnFAEGg3ViLNaxPQaDuQTSEjOLAVjdY8O8Q7Fdmifw5IkZElEtnJ8ewIgkBjAQruxqLjIjT2baf0PcwGgTp1yE7uNvHWAMP3Ff+96i1L66ckr+ukAZvlCxSUPDsWyE6jRrCg7pgIgCk7DPkgpk3N1sJED5CRFIVSkx3i2bFyYZTgJ6GCRZWx+tGgnFTcadmdeA6InVMFjEWgEASMT34NANhYu1fe07QTqzbn5CmVsQYHBXhdDnTGUvhyYw8AgzJWfLt0ceasqVe5WTt2PTseBdkx6TQioMQ+pGdQ1ngfqRMrRchOucpYAHqdItlxxbZQVYfnXOiDtzDPDmCr/TzgceJ+960Y/ehe6nRrDVBlp1TpydtXAjePBp6/ECGPi6Zaa+4DEnHbikZk4TRvPQeAQKOYAg/oqxzSuUGgyo76PJPOCrSaXLBnh5axtGZjFansaA1mtVnKcjsd9HhYK5GdsgUKpqyXsQaFvHnBgmv1zMkAIzsM+RAEAS4Hp13G+lIamNo0Ti4DGYGWiixcmGmbsh2yUwrPTuH+F7uIKU8+dkdFEBTjNdr+PWr5HiQENzb6d8t7mnZi1eZ4ACRlx5VNYMqIenFZaayErrJDlLrgINEYawZLnh0LBmXla01g1npOygAqSMpO0i2SHXcZlZ0et0iEPfF2SnZSrhoAnP25WAQ2ggWb3Ukc4vgCrlQEaP/GcNloqQ3KW5aIF/Ov/wMHnzJOUe6Rzckel8P6b2NWyiI3QhKRzi1jpRSDYgv27BiWsYhnR6d8R/J3dDw7hICqOveISXnNO2L2jJV1lI5x0tZd/lER5rl3jUEPenOCBdd0GpEdVsZiyMHfz9gHK/50JA6bkEM6eB746B7x3/ueZ62DyGqCcjYj39Hb8ewU1Y0lHRACr3/nVGLElYMSo4puLDsopvy27n0AwOf8OEQz+ReEdqrs5JIdUdlBOoFpo9U5SLqzseyUsABDz46l1nOXV2zjByypXol0lt755s7QIRcHzdlYlOzUi6tXRmUn4hZ/ayXZSbrE7W97VAQBHRmxwZTk78Uth4OTlol3Gi5LVMtgqYaAkmTcdBzY8Ilx1o5E3DYKg1Dvd1v3k9D2c2Oy4/CK54rcmyrlYNBCFb4aS4NAzZQdHYNyWqO0OHiyGHWRDAObFltbR4mQkTJRJUdFEGjNx9L17PC87uDjagcjO2UGx3FqzwYArHhVTBv11QGTfmLtjQihyKb0704ASeUQAM4pX/QswDT8zcq6Af3m21ENSowWYFAGilN21onzsD4RxmuGxdH05Lqck6eLkJ049hvdqHpKt4xFcy0skp08zw5RdmqslbE4zpZJmZRr3U6OnswJDI3v0kW/zy0m0pbTsxP1iPuGN9FByU5CyhcpvIw1TPw7FTGdgD2ZXyb/x4TsxLRKJsVAOQZg1f9QJ/lwNNvPVenJNlQH2pGlR3bEfZCT9is9Zcft5OynWUsI0UGgWWT5HPJpOvVcv/U8leGRzorvpyI7DicwZpb4b4ulLHL8yZ6dyo2KIGgKqedj9cRTdJxIXqBgfDvApyGW1G0q6RUGIzuVwEd3i3/vdYZhlLcKSjnS6OJMLoyh1rxEXyMURXYcTjnbpZ/az1XtuYWWsQqdjyUIwFpR2fmY311TDdMaAgpAoez0YcqIBjqFHDAiOxbTkwmUnh1BUNTvQ/Su3tCgDNgyKXcr2s5zlQB5v9Ly7Ehkx1UPoNxkRySA/j5Z2elzEmWnQLLjCcg3FCa+nQmpr+T/xI3HDMQVoYIlQS7ZMSpj0fTkZmudWARNkklZr4wl3QQ5dMgOMSwXak4G1N2MeTcgplPP9ZUd5fGdF0BpM2+HrCMhtOU3KJtfX5qCXtV8LKLqtNZ68wk3KWGFWq2V1KsIjOz0N9q/EWu8nBOYdq711zndcqeAUSnL7oVRAi03FEJ2gH5vP6fKjisrj4qwW8YqVNnpWQdENiPLufAFvwvtnlFCHgKqbVBGOg6/x4k9h8mDWnVDBa2mJxMoPTvpuFheBACPbFDuS2fp1HVNeGvFvy2kKHdSspN/lxowKo9KZCfqkpSdMpax+rzivhFIbqP7S5wjE8+LIBVWfDupOEYll8v/N1N2SOdPqTw7SrKzZSmGusVjVLOMZTdjh8BU2RE/0+mXyE7OeYYoO8UQXq/LSfehvFJWEcoO8Zu5HFz+PjpGMilv+szS6IjcG5oms0GrhYLe4NSaLtoY9KjmY6019OvsmOZkgJGd/gfx6ux+rHyitAorKcp2L4wSfMXMxgL6NVhQEAR651aTlu6S7YyKICjUsyOVsDrr9kACXs3sFGI6btMxKJNsH6VvR9ejYTU9mUDp2aHfjQM8QdXJ1lDdkbZnNmFOdkieVGMw/+JoSKIlhSPmFMlOOQ3KfX7xt/Nmo+IQUQAxR5GeHcBaR9bGT+GE4vubeXbKqexAwOTMUgAayo4gqAzKtspYxKAc355/0eeztPPU5RMVw1RuGavIUREEoZxhmxRFKDs0PVlrfAcdHcFbGh2R68sbVFNmg7LVMpbk2cnGurBmGyM7DMUitl3uwtrv/9l/vRWTst0Lo4SiylhAv5KdZIYHKckHCNmxMyqCoNB1lszJXYP2BpCfiptIZ+mFpEWvjCWdXPeVfDt+txMuPWXDbm4SUXb4tCw7e0IAx8HllBNbrYyMmP/sp8adW9CfeA7IJFozTE8iOxFH+ZUdwVOLqCBti22iyhItibJjIWtHIscJklJbKc9OwygAwIT4ZwCA3ngqf7m0eG7ZLDRZy9gh8NbI55zcUpbifEXITl4ZS2pFL7aUqRzJoAKdep7UNpNTZSf/O6vmYmnBRimr35QdG91YAY8LfZJ/jY93Y42UnrwzBQoCjOz0Lz57SDzYhkyRA6nswEouTAGBgoCJt8IK+jFYMKa4a/MmpQuHXXMyUFBSMAB68Yq0TAOQfyEnJSy/24na3HZyquyIJ5T9xjRh8vB6nDDF4E6JblOLap3bL+8r3WvFvxW1eysjI5JO8USX6Qvjs7XdussB+m3ngMF+JQj0oh/hym9Q9rgcaBck5a9DTL6OIKBax4JAlZ31+stI5Pgdfk/x/33Gnp0Y7cYqEdkhHqEfzAUAjA5/AkDIV3ak7xB2NiIJj70yFqA/NoKQHc4Jj08k+7k5O8UOASXQbT9Xlqe0SlkWlB1d8mmD7CjjJTgO1nKMCoGNbiwAQEA6NuJdNGNnZwoUBBjZ6T9kUsCnfxf/vd8v7asQgLXwvkLJjlGLsBX0o2dHOZTPWcioCAJleJ5VhLeIwWmcA4nB+wBAnmdnq2IAaJ7sLY2LIGUsv8eJhefPwPyT9tT+PFV6sg21jqg73WvEvxV3eFZGRnRlROISQh++WG9Mdrp0Jp4DBmWsVEwk/gDCDtFXUOyFzggqsiMRwLBAlJ0iPtfMs5NJAhs/BQC8khX3FzODcqxcree7HQ04vQgl2zGW25zv2ZG+wzaneCzZvhDrmZRpxk4IXrc0MiGdq+wU79kBDOZjKUlMNofsZDOyr03Ls0MydvRI8agDxDK6hdERSrLTGPDoq7nF4v+39+ZxctR1/v+r+u7pmeme+0gySZBALhKGBGIgrmCiHBpOD/gFjCsrCxIFwQP2txz7Uzesq/4EZBPR1bhf0Sj+DKsoYCCSyBXIBSGEIzC5M5lMJnP1TN/1++NTnzq6q7qru6vPeT8fj3nMTF9TXVP9qXe93q/3+y2nsTJ7dgDAXsMUZnt4ED39bD08LW2wQ8oOYcRbT7DOxrXtwOwrc3sNM56dHLonA3nOxgKKmsYaU8vK8sTzfJSdLLb5IFN10H4WPHXs5Jm8z3glVmudjkSdpOxkZHxAKvVEdsEO9+3oKTvyyAjjYOd4mD2mVghh56HBtH/KqHsyoO6gnHRc8VSOw4sxsOcVUtlxO2w4Du7pYmmMIZH9L/JKY2Xy7BzdCcRCCDkb8JooNZ/M6NmxMI0likqwU98BTF0MAPgH2xs6yo7UPVlgx47fm6WfxKjXjjyuxCcHtCml57Kyk1+Ap8zHSnpvdtV7SVZ21D6etMqOwba5a1WjI9KrO3wyO2DQWd8q+D43We3rrGPeQXsiglh4DIIATGmsSX1ghU48B0oc7GzZsgXLly9HZ2cnBEHAE088YfjYm2++GYIg4Ec/+pHm9oGBAaxYsQL19fUIBAK48cYbMTpavBlNphBFpdz8vH/SzQubwm0ioMg7jVX+wU5Q01Awh4nnnFy2WSo5x9QL5H0WTJLM+7g5ObnHDqApPTcF/3/WNGV33PDKtIE0ys64cRrrcJC9t1qMYdfBQSSS+5aoSJvGMpqNxU/4NU3yia6Qnh2X3Ybjorav0akE+19YUo011s/mTyUjpbBONi/AgChdZUfH9B8roQwCtUDZCY/Ic8DgbZBTLh+x7U7tsyMpO4cSLNjJWtmR01j7tLfLyo4S7ETiCc0xFbboGKj1GHh2BMG4saA6+NGZjSUHO+nSivLoiPRzstTl8QXz64hiVgZlAKipDSAmsn0fwCg6/d7Uz4UoUhorV4LBIObPn49HHnkk7eM2bNiAV155BZ2dqTt4xYoV2LNnDzZu3Ignn3wSW7ZswU03ZVHSXQwObWVXeA4PsOAfc3+dTGmsRDyn7slAhaWx1A0F80lj5aLsSH4dTD1f9lQkG5S5spNSiQVoOiibYiQ3pQ4+qcrr1AH2XbXocc9OujTWByNsafAJIYyEY3j/hPE+OpkujWUURPNUTk2jfKJzFjLYcdjQK2qr9U4lLFB2PAElHTp0OPV+6XgZbluEIDyIQjrZpfHtBNXHd75wVcfhYceeFOx82LYXY2NJAZfk2emJsaAwuRt2RtQjI9QmYFWwo97XanVHVnbySSnC7MgIA2XH7tLtTTbOL67SHScmR0eo01jNesqvFcRCQELaBhMGZYBPPmePDQhB/Uqs8VPKvsqy2rccKGmwc+mll+I73/kOrrrqKsPHHDlyBF/5ylfw2GOPwenUXmns3bsXTz/9NH72s59h0aJFWLJkCR5++GGsX78eR48eLfTmm4eXm8/7rOKlyAUeUBidnIMnWO5ZsGXdcyZvZccqg3IinrH1vqahYK6jIgCVZ8ekQTl4EjjBzK3oWixXNY1FYhBV2yynsXSDHVUay8wcsdHclDp5fwxLJ2Bdz47+onwqGMGRcfZZ6/CwgGjnwUHDPzWQZuAtP64isYS2q62eslMsz47EqZik7OTzdwVB5dtJMinHY8DBrQCAUMeHAQgYEiR1xyCVFU+I+qMJcoUHO7wtQ+scxGtaUCOEcVr4Lc1xy5Wd9yPssQFvlspOw3T2PTSk9SWpZrOpfVlqk7JcjWWRspPVyIgMoyIyprEAoGM+G4aaYXSEusO47sxEK1CvwWaDHdV8rIAwmr7svKZZ8R5WEGXt2UkkErjhhhvwjW98A3PmzEm5/+WXX0YgEMDChQvl25YtWwabzYatW7cavm44HMbw8LDmq2AMHgT2/pH9vOjm/F4rk7LDe+z4WllX4yzgJ6VYQkzfbM5w2yxQdgY+AB7oAv7yjbQPU2T+PEZFANkHaAdfZt9bZgK+ZlkNS4jaq9S+dMoOX1DFOBBPX9INIOe0pOzZ4cbLLDw7u48MISiVaXd62L7eeUjfpByLJ+TW8o1p0lhAUiCtCnaiFjSUywRLY2mDnf64J2Ubc8LIt3N8Nwuk3X6IbbPZQ5A+2FFX9llSjZUc7NhsciO8C4TX5TJ39fYfTrBAOavSc4B1lK6XRmioTcoqZcdht8njc/SUnXyPgbqclB3jhoKAts+OISZHR6jTWC2FUnZ4I1BXneku+k0+N06BXfydK7xddZVYQJkHO//xH/8Bh8OBr371q7r39/b2orVVe5JzOBxobGxEb2+v4euuXr0afr9f/poyJcvmftnw6k/ZCWf6R4G21IAtK9TTxfXIsXsyAHhcyqGQ23ysLFUSPd59hl0FvvOXtA+TO8zmMyoCyN6zo0phsb+vak+vWlx7jbonA4qyA5gzKQel+VbZKoLJSpdG2ZHSWAaenTePDmEUTPXw29mJwEjZ4RU9gqCvBKiv5DUm5TJQdk5EWbCTVxoLMO61w4+Xrg/D52HHwgAfuGhQkcVPrHabYE11WnKwA8A+YykA5tsZ5L12IkE5tXZEbIbPZc/t/8ErstS+HR7sSAG3bFJWVWSFLTIoy3129IId7scxUnZ0/DqAauhwpqBY9u0YBzvqpoKFU3ayMycDrLjg2fg5AIA7nb/HRcfXpSrPFdxjByjjYGf79u148MEHsW7dOvOTd01y9913Y2hoSP46dCj9XJuciQSBHb9kP3/4y/m/Xqamgjl2TwbYlS8f05STb8cKZecY6+yK4SOmDJz+fEZFANkrOwcUczLATki8bJmfpERRlLsnp8zFAtjYD0FaNM2YlLm3o6Yp/eOS8SU9XuPZ4SZOfWXnzSNDGBFZsOMD+z+8c3xE92qZp7D8XqduGa0gCLJqGDJQdviogEJOPXc5bOiDKtixOTAYYyceT54nWDmNldxrRzazny+fKPsTPNjRV3Z40Fyj1603F3SCHa5AzBX2Y3RAuiiUVJ2Yqw4jqMle1eHojY2Q01hsjVAqspTjwaqAt1YKJrJLY5lTdjJWx5kYHaFuKliwiefyqAiTPXYANPvcWBtfjodjVwIATtv9I+Dpu1jrCw4pO4Xh73//O/r6+tDV1QWHwwGHw4EDBw7gzjvvxLRp0wAA7e3t6Ovr0zwvFothYGAA7e3Gsr/b7UZ9fb3mqyC8/ht2Mm48DZjxifxfL5NnJ8fuyYD2pJTX5PN8gp2ju5SfeX8YHbiy02KX5NpcRkUAihoVDWo/1HqEhoHeN9jPkrIDKAsgXxAHx6Lywt2qp+wIQnbl51wB8Damf1wyZpSddGksSdlxREcxKeCFKAJv6JSgK92TjU+ONXoVWXKw0yjvL6fD2osaNS6HDVE4cEoIAABEj19udKhWNXNCL42VSChtCqZeoAp2pGPOMI1lYE4e6AGevV9J25pFDnYCym117fjANhU2QYTQs5ndJqlSoRp21Z51Q0GO2qTMCSueHUBRb3QNyoVqKgio0lhJnaN53x0Dz07GDsqcwBSg+Qym5PdsSbt9QCGHgGZXiQXwthECfhD7LL4d+zy7cetaYMNNyv6iYKcw3HDDDXjjjTewa9cu+auzsxPf+MY38MwzzwAAFi9ejMHBQWzfrhjCNm3ahEQigUWLcuhQbCWiCLyylv286JasJpAbktGzk2Wn3SQMy4TNkK9BORIE+lXDEpPLV1Xw7WuBFOzUtubWpFEt82ZKZR16lS1iDdM1H3a+AHLTNE9hNfpcxpJ80siItKiCgqyoSUp7aTw7BuW5AAbHIjg0MC6nsRAexdldAQDQ7beTruycozt3bUxRrJTSc4ua6OnAVaOTAguKRbcygLUgaawTb7NAw1kDdMyX/TfcF5FR2UkuO3/5x8AL/y+w/ZfZbZuesgPgTc8CAIDvkHRSllSpEQ+7SMw92NHptaPy7ABKxZWeQTnfYKcuJ4OyWWXHxHGSoZuy3Sagrd4NmwBMavBmfr1cyGJUBEf9+X3OfzVw9U8BmwPY/Tiw/jr2P6zwNJZF/chzY3R0FPv2KSe1np4e7Nq1C42Njejq6kJTk1aKdzqdaG9vx5lnsuZcs2bNwiWXXIIvfelLWLt2LaLRKFatWoVrr71Wt0y9qAgCcPWjwLafA2dfZ81rZvKYyMFODv4V5DkMNN8+O71vKmZawHh6MpQTQiPySGEBbPET7MwsHBkFPGkUvqQUFid5qrcy7TxNtUJSF+W05JzGSgp2TCo7bx5hAaQ/0AiMA4iN45xJPvz5DX3fTrqyc45XrlrTV3bCxTAoS699QmjC6ehBQtVZNu80Fld2Ro4x07ndqRwvk88FHC64RRE2ARgQc1R2hqQTzUiWVaY8nZIU7OyrOw8Y+wMaj7/ALsykQG3AyS6ULEljiSJbB5ODHR3PTnGVHYPSc6NqrCg3KJs4XX7oY0wRef855f0n8bPPn4uTwTBa6wpU0ZSDsuNx2uFz2RGMxJk5ed5F7Jj57Q3AvmeBX16uqIqk7GTPtm3b0N3dje7ubgDAHXfcge7ubtx7772mX+Oxxx7DzJkzsXTpUlx22WVYsmQJHn300UJtcnZMOge44sdZHXRpydRBOcfuyZz80lh5BjvHdml/T245r4KfEAIJKRDIpRILYAuRWUXqgOK/UMPTWDwAO57OnMzJKo3Fr8yzVHYcbm2reNUx6PcaKztvHmUB5GmTlGNocf1JTMIJ9B58F+KpA0wFGDwIDB3BwAg7caTrBpvJsxMtkkEZAPokk3LcxfaN3SbAac8zfeZrYeZWMaFc/XJz8rQlAFia2Ody4JSYwbNj1FCQf7a5Yd0sBsrO8YYFCItO1IT6gP535RRcvy3HURGchmms9UU0qKTVI5nTWFaPi9D1o+Wo7IybNSgD7GLI5mSfD4PREWdN9uPCM3Ncs8yQQ7ADKB4ieQDojI8DK//Iekkd2aa0ViBlJ3suvPBCbZ+HDOzfvz/ltsbGRvz617+2cKvKmIwG5RzLlCUM5xiZIV/PDvfrNH6IBTrJwwRV8MCiPi4t5LmMiuC46pivKl0VWXQcOLKD/ZwU7PCTEg8Qe4ek7slplR2TXZTjUSAsqVfZprEApu7IZaipys5IiPUHUhthdx9hf2/25CZgvweIhTD7fy/Fix4AcQAPav/EFbXd+G/cktazkxJE9+1NMiizoLWgHZSlk2ivZFKOOdmJwOOw5W8EttkA/2R23A4eAgJTUyr3AJaaOhXlyo5+NZY8FytZReBX1RYFOz5fLbYmZuIf7LtZykVSdo4JUtl5tqMiOA4XU7oGDzB1p65dMxsLSG9Qtm5cROqxnauywz2CploUuGuBrg8D+/8OvPMUcP6qrLbfEnIwKAMs9X5wYAyntajKzqecB3zxaeD/XKUUwNRXXkNBoIw9O4QOrjQqRCKuLIg5BjtKGiuHPjv8gxUdY9uSLbwSS5rMnM6zw5Wdupi0kOeq7ADmlJ2+t9h8qppmduWqwuvkyo6Uxhoxk8YyqezwExUEdnWVLWrfjk6fnXhCTOn+/KYU7MydVA/M+xw7ATg8CMOFkOhE3OaWb4Ngw7TRnfit69uY7Bgy3AyPOtV36FXg55ew1GFnN1DbXpzScymQ2pQ4B2iYhsHpl7Jty9evw1EPBB34gCkxdhcwaYH8kM6AN2MaS26roO6xk0gon+0xa4Idv9eJvyfOYr+8v0lWdvioiJw9O4Di2+HqbFIayyMrfYVTdhKi9vUB5K7sRLPsaD3nSvb9xQfzb7KaCzkqO1ee3YlpTTW4KFl1ap0F3PhXlpKde41yYVthULBTSahTRcmK2NhJaQaOkLPSYUk1FpC9uhMdZ4ZOQAl2gn2sAkoHWeqPSieMfIIdM+m343vY9/a5KTl4ruzwcvjjQ2aCHZMjI/jVv8cP2HMQYdVeJpWy43Ha4JD6DKh9O0PjURw4yQKwuZ1+4PKHgH89Dvzrcaw+53nMDP8S3z77b/Jt+Oe/45StEbNsh3D59n809FnxNvsNR58H/ucKIDTIFs7r/wDYbMWZjSWdRF+PTwNuex0npi4HYGGwo67I4qrOpAXK/xpsn55SBzs6qrbcMFOtIoQGlWGwfByMWQyCnUCNE39PzGO/7H9BvmrfH2OPS+fBygivyOIXLEaeHY1B2RrPjtdpl1tojCQPA+Wz5QyVnTz77HC6P88KGYJ9zFhebHIwKAPAFy6Yjue/cZH+ANBAF/BPzwKf/rkFG1gaKNipJOSAQkxVBeTuyS25nRiRZ7AjXekDyD7Y6X2TBWq+VqDlTEWRMMh589lY3ogUDORqUAbMKTu9b7LvbXNT7kouPefKTrs/jWeHX2FmVHaU+VE5oe61o7rKEwRBmY+laiy4R1J1Jjd4U+YidetVZLXPxe2+/8D+RBt8Y0eAn1+sbR8g4XXZcYXtBVy44zb2nj+0FPj8/8rvK1JEgzIPrLih3JPnLCYZuSLroG4KCwDOmuTHAK/Gikd0A+ygXk8Xnp4GWPCSZvaSBvXEcx1l521xCgZtDdJxKAIOLw6GpJEB+Sg7yb12ItqTr1KNpVZ22PvO9xgQBEExKSd70jKOi8ijg7Lm77iAZfexn198KPt2AfmSo7JT7VCwU0mou+8mBxR5dE/m5DUMVBBUXZSzlG65ObljPnud5CvDJLiy4w5Lkn4u3ZM5mbpSA8DxdMFOUum55NlJW2khp7EyeHZy7bHDMVB2AHX5uXL1y/06Z03yI5lzutjJ8q2jQxpP19vhRnw6cj/Gm+Yw1WHdp1J6jHxs6A940PVfsIkxYO6ngevWa5TAYhiU3XZltEcsnkAoxoOdQig7L7Cfk4KduZP8CMGNcUiBpE4qa4x7dtQGZW70lR+knwJLITqu9JBJDnZqnAAE7HCcrbpxMk5JHbFzrsYCVL12pIuVFGVHMijrVGNZcQxwT1pKRRYPZuJJfXZ4H5l8++yomX0lU/aiQeD5B8w/zwrkYKdA/eMqFAp2KgmbzTjtkkf3ZI4n32GgmarFjOBqQOfZ7Luc8zdQdqTFxzluQRqLX/0YKTuiqAp2Usd9yMNAw3FE4wmcDEoGZb8FBmWViTcnuEJmc6RcteqVn795lKUN5+oEO5MbvGiudSEaF7FHepwoihgIRtAPP059dgMwdQkLGn91DfDWH9m+e+7bWH6UuZq3tX+W9e9waE+kxSw9B5iSFI5aHOxwz87RXawSR7ABU7S9vma01cLlsKX17egqO8nKgFnfDld1bI6UYNcvKXsvivM072FImnOWczUWoA12Eoks01j5/z+sVHYisQRi0gDbGmcWirkgAB//Nvt5+zqg/z3zz82XHMZFTAQo2Kk0jLoo59E9mVOyyefcnNxxNvveyOfr6HtAguEY3IjAHsmzzw6Q2bMzdJhVa9kcLMWWBDeSjkXiODEShigCTruAxnRXxqYNyvmmsaT94qpN8RrV65SfK+bk1GBHEAScPYWpAzsPspPoSDiGaJydCBobm4Hr/z9g5qfYlfPjK5k/5+/fBwD8Z/SzeLLjtpTmmqIoFtWzA7ATGD/GvVYrO7x6rmN+ShrBabdhVnudyreTWpHFq7FqNcpO0pw/s74ddQor6f/P55htis6Wb0vUT5FnSuWl7Pi72OclFmKGbX6cp1RjFUbZkcvPjZQdQ4Ny6gWKejBr1gNjp10AnHEpS9E/e392z82HHKuxqh0KdioNoy7KeXZPBgCv1DY/p6aCQG7l59EQcGIv+1lWdnRazkvEEyLCsQSaePfkXEdFcDIFaNyc3Hym7pVfjdwwLyZ3T26t88BmE1IeK2O2g3LeaSxJEdJZ9Orc3LPDruSHQ1H09LP/m14aC0j17ZySGgrWuOxMIXF6gM/8Eui+QWqZvxmAgL+dfjceiV+JkOrkxuHBElDYYMduE+Rp25FYQq7UscyzU9+peNaAlOaTnLmT/PkrO2bLzw38OoCi7PSE6iBKiuV4TWfK/TlhdyhVi727ldv5IFCnzrgIPh/NimAno7Jj3qAsq8h2IbdAbNn97Lh4+0ng4Nbsn58L5NnRhYKdSsMoVZRn92TAoPlbNuTSWPD4HiARYykX3qxKNjimenb4lVazIF1B5zoqgpPJs3NcWqwNJtbLTQUjcfSZaSgImFd2xvJUdiYtBFrnAGd9OuUuruwMSyeEPVLn5EkBr2HPHB7s7JI6Ket2T7Y7gMsfBj56FzvhffaX+GDa5wDoK4b8JAcUNo0FKMFUOJaQj3G3VcqO3QnUqTrLGgQ7Z03ypx0ZYcqzY2GwAwDBc78KNM1Af9clAJify54uWDcD/wzzeXKCTQ42lA7KqeMiLFF2jLoo56Ts5KkAts5kwT8AbLxHtwLPcuRqLAp21FCwU2nwaD05oMizezJghWcnh2Dn2E72nZuTASWNNX4qRerni0+bzYIUFpDZs6MuO9fBJ/eQiaHXTNk5YH5cRL5pLE898OWX2NVlEsmeHU1/HQPmTQ5AEIAjg+PoGw5hQBoCmtI9WRCAi+4GbnsdmH2Fkh7VUQwjsSIGO7wiK16ANBag+HYA1lhOB7WyIwZNKjv8Qob3WsrWs6MT7DjsNjkoODFtOfCVbehzs4qy5Eq8nOC+O56iVqVS06WxLFV2UoKdTMpO6ud23OzE83RceDe7wDm0lSk8hUQUSdkxgIKdSsPIs2NJGiuP2VhAbmmsZHMywORu/j6STMrc09DplD7Q+ZiTgcwBmlx2rq/s8H0WDMfRO8wW0czBDjcom1R2ck1jpaFe1UUZUMZEGKWwAHYSObONLaA7Dw1iYCzzxHNAlR7VCaKjkrKjTjMVCnX5ueVpLEApP2+dYxigntFWh2GBBZSjp46n3D+mNy6Cp7H4MZiLZ0cHru4MSalMnpbMy6/DaZIuWORgR6m+4ybkkEbZsTDYMRp0a8+g7NhT33fWPXb0qO8AFt/Kfn72ftYZvVBEggAk9YgMyhoo2Kk09AKKREKRui1IYxXVoCyXnZ+tvT25V4cEV3Y67BYFO+m2OTKm+IbaztJ9uk82KMfkNFbaSizAfOl5vspOGvh0aO7Z4WXnc9IEO4DKt3NwEANBk8GOCWWnkH4dDv8bkZiqGsuC6h+ZVsns+6GLjLfBYYOrnlXJ6QU7cgdljWdHepwc7OSv7ABKsDMoBa2DvOw8H78Oh39+eZWoKtjx6PTZsWpcBKBWdpKbCvJgJ6n0PG4ijZVPsAMA53+VpepP7gN2/E9+r5UOftEm2LStSggKdioOPSVifID5XgBLqrGK5tmJhdmcJECr7ADKlWGSb4crO61yGquAys6JvcxoW9NsGFTVqCZ695r27JjtoJxn6XkaeFPBkVAMo+FYRnMyp1tVkSUHOxmUgHTpUavGBJhBN42V70lMzaJ/Bq7+GXDRv6R9mL+JpZqjI6lBC1cS+Akb0RDroAxYHuzwxoFc2eFBT15l5xxeZMDRUXYKMQgUUAJ586XnxuMiDKfQZ4unHrjwLvbz86uVVJPVqFNY+c58qzIo2Kk09E7O/OqpppkZJXNEnmFUwGAnFk/IqQvZnOxtVEp3OcnzdST44tOqNijng+zZ0Vl8eApLZ0wER91BWQl2zCo7adJYiYTqZFVAZScUxZ4jQxBFoMPvQXNt+kCNKztvHB6SlazGNBPPgfSKIb+idxZZ2QlZ3WcHYEHsvM9knB3U1s6M+LZxrR8toZpVJqdNglIKy+5SPhMWeHYARdnh6t6pMQsaCnLqJytpI0DT50fpoMzeayyeQFzqZVNYz452EKjsF0vj2eHBpyVB8YIvMMUreAJ4qUBjJOTBv+TXSYaCnUpDb/K53D05v2m06dINpsjg2RmLxHD1mpdwwQObmEKT3DlZjUEai3cqboRFwU66AI2bk3U6J3N8qg7KfC5W2onngGpcRJo0VniIqUpAQdJYas/O7jT9dZL5UEst6twOjEfjeOUDdrJuyujZMe7MbWXJcSb0PDvF+LvJdE1igb03NghRVZ0zpgoGeXpU9uvUtilmfIs8O1zZGRyLar7nNSqCY7MphQaANtiRq7HY/8DqijxDz45K2fnT60dx5j1PYcPOw2mVHR6g5+XZ4didyhiJF34I/PnrwEBP/q+rhqfjyZycAgU7lYaeQVnunpzH2ASo01g5TD0HMgY7/8+f3mKKwEgY+/pG9c3JHHUXVvUJQfI0NCQG2Q35prHSeXbSjIng8BO5KCqVNJYoO9yc7Ko1nNmTD2rPzptpxkQkY7MJOFtSd7iSlWloZE0axTBahO7JHJeqCqggaSyTdHVNBgD4xREcHlA+K7zs3CaogjB1s1Ae7ISGUn0neowPsu/egO7d9V6jNJYFyg6gTWWlSWNpKvIsUPjMKDs/2fI+RBHY9PaJDMqOxcfJrMuVxpuv/RR4+BzgdyuBI9uteX2qxDKEgp1KQ0+JsKDsHFBVY+VsUDZOCT35xlGsf+2Q/PuJkbCxORlgU4MhMFlWdSXLZWV/QgoGrFJ24mFtlYQoatNYBiSXpNa5HcpVuRFmxkUUsBILUFIYI6GYakyEuVk63VMCmt9TSs+T4KmisXIxKMdVaSwrDcomcdexoMUhJPD2/sPy7UGVP0TgSievsqxtY6XngrS9ZuZj8TRZJoPyeAGUHSBJ2VEHO9o0Fj8G7DYBDguOA9mzY1B6Ho+O402pr9TBk0FTnh1LlB2AKdif+xWw8k/A6cuYevvWE8BPP8bmyr371/x68dCoCEMo2Kk0dD07PI2VZ7BToDTWoYEx3P0H1pzPIZUX9w+OAMffYg/QU3acHsXHo0plBSNxuBFBTUL6G1b12QG0QdrQYZZKsjmA5jMMn263CZry5dZM5mTAXAdluRIrj+7QaeAnhPFoHO+fYMeSmTQWAHR3abep0Zf+PXtVHXMTCe1CbuWYgEyo01hhufS8+MEOHG6EbEzd23/woHwzN9/X6JWd17ay1BA3q5vx7WRKY3lZkCqXno9ZWHoOKB4jwMCzk9B8tyrgrZW6g6calNlxGo8on7sDA2PplR3+P8nXoKxGEIDp/8DGq9z8IjDvWrbO7P878OvPAP+1GNj/Ym6vTaMiDKFgp9LQCyjkNFZ+wY66akbM5epCJxCLxhP46vqdGAnFcE5XAFd2M3Nmom8vkIiyq9XAVP3X4xVZKpPyWCRm3agIgOXRuZFSHUDyFJbBmAg16oUwY9k5kF0aq0DKTq1KfRJFVkGWdlK7irOTlJ3MfXaUk3copg2kw7JBufCVI+pgR0ljlWYJjLrZcXvk2FH5Nt3Kn9GkCxmfNNw1k28nGlKOr0x9dpI8O5ZUYwGZ01hRbbDjtqjnUabZWLaE0lRwcCwKkVdF6ik7Vnp29GifC1z9E9aAc/EqZiw+sRf43edzq9gig7IhFOxUGnoG5eQFMUfUJyV1WahpdOZ2Pfjse9h5cBB1HgcevLYbnVIwUHNSaiOvZ07m6JiUg+G4daMiOHq+neOZU1gc9UKY0a8DKB2UEzHjBmMFLDsHWAddn2q7zfh1OA0+F6Y3s5OXwyag3pP+qledKkpWDSMl8OxEYvGSprEAwC4FLaf6j8kXFkG5oaBOsMPTtXKwkyGNxcvVBRvg1v/fppSej0vKjtciZafRKNjRprHkURGWKTts/0ViCc1kda7cOJBAwCPIQZ0Yl/xPegZlq9NYRvgnAxd/F/jam2y/jfUDLz6U/euQZ8cQCnYqDR5QqKP+EWs8Ox7VCSenVJZbu20v7evHI8+zPjkPXD0PUxpr0FLHFpSGwTQpLA6XwVW9dsYiMbQIg+yXfFNYHL3UYIbOyWrUV+Lmgh1Vsy8jdaeADQU59armcWZTWBzu22nwuRR/iQE2myCf4JL9YFE5jVX4oMOt9uzELJ6Nle22+Nmx6wyfwlGpik9OY6lPrGqDMsDaSwCZlR2ewvIEUibNcxTPTgShaFwuTAj4LFJ26toBpxTkqE6+SgdlrUHZMmVHFSzyJo0ANMHM5XObpYBdhC1tU0Feem5hGisd3oBSsfXyj4HhY9k9n6qxDKFgp9JIVk9E0ZLuyQC72udXVzmZlFUptoFgBLf/dhdEEbj23Cn45DxWFs+Dnc7xd9hj9czJHHVFlkQwEkezIEm1eTRQ1KBnrDZRicVRK2IZy84B1jOFT8g2MikXOI0FKL4dIDtlB4BckZWp7JzDT+DJDStlZacYBmV1GivC/q6ls7GygCs7DRjB7sNMqeSVhlplR1V6DigBfibPTga/DqAdF8H9OnabgLpMBnuzCIKSilYF+B5Vnx1RFC337Nhtgny8qX074wnlfV05twlTm3xwQ6WspjMoF/M4mXU5MPlcdiH0/OrsnksGZUMo2Kk0kqeej59iZYyAJSd/vhDlFewkorjrd9vQNxLGh1p8uHf5bPkhLXUeOBHD1JjUXyKdstOoCnYkqX8sHEOz3GOnQMpOZExJnZkIdtRzjDJ2TwbYSSCTb6cYyo4nd2Xn4jntOK3Zh+XzOzM/GEpQkVyRZeUAyExoDcq8qWCJlkApPdkojMil/8HkOUzqCxk52MlS2UkX7EhpnFA0gePSXLeA15lRqcuK7hvY53iaMgWeKzsJEYglREtHRXC4ujOiGhnx17f7ERXZ3+ju8KCrsSYp2DEuPS94GkuNIAAf/zb7eef/AfreNv9cMigbQsFOpcFPzLEQEI8p5mRvoyX9WPIaBqqquHj1nYNwOWx4+LpzNAbe1jo3ZgiH4UIMorteKjE3oGEqK7WNjsnvMxiJq9JYeZadc5I9O317AYjsKtqEWlaTbRoLyDwygis7BfLsAIqy01LnNr/dEm31Hmz6+oW49aLTMz8Yqu7cBsFOUTw7knIQVqWxSlKNBchBbANG5KaOKQZlzYVMlp4dE8FOndshD189cJIpxZaVnXMW/TPw1R1AwzT5JnW6KhxLFGRkSK3OyIgNO48gDPb+hHgYU5tUwY5gYxVRScjBjlVql1mmLmb9eMQE8Ny/mX8eNyi7zbWRmEhQsFNpqOXJyKgl087V5DUfy+5EQqps8iGE//uyWZjdqf3QNde6MdfGVJ1427z0BmO7kwU8gOzbGYvEtAZlK0hWdo6b9+sA2qs+U9VYAODI0GtHTmMVpvQcUDw72aawcsFoZARPYxWzGisUiSMaZ0ph6YIdrbIjimJq6TlPYXkCyoVMtp6dNMePICjm8v39TGG0rKFgGtTpqnA0XhB1ry6psWDfSAhb3j0hBzuIScGOIAU7Do/uWjRuxdTzXFl6H7vYe+cv5kvReRrLRWmsZCjYqTTsLuUKJBJUBTvW+FfSDW00w7jATuIXTffi84tTS8q9LjvOcRwAAIw0Zk4RJVdkjUXiaLE62EkyVmfj1wEUZUcQkHG2lIys7JQujdXhZ9uwYGrhAiqOURBdikGgw6qr/VJ5dvj/tVEYxclgBL3DIVlFkA22elWWFnp2AKWnTsGUHR1sNkFR2VQVUwVRdqRg54+7jiIhAiJvMxELoavRBzeYciYaqOJyB+VSHCctZwDnfJ79vPEec80GyaBsCAU7lYYgaE3KFnVP5uSVxgIwDqZsXNDlNcz9z7czZedE7azMLyiblKVgJxxXPDtWpbF4TwpZ2ZFmYrWfZerpvIS7udZtfqBlui7Koljw0nMAuPmjp+H7n5mPL5w/rWB/g2PUnVseF2Ev/MlEDnbGFZ9GKWZjAZD/r20OdsztPjwkn5jltGhy2TmgSmNZE+xwdW+/HOwUXtkB1OXniYIoO7JnRwpsN+w8AgBwuXlDzzCaa13wO9nfjgv677topedGXHg3q2g7sp11Ws4ElZ4bQsFOJSIHOyOWdU/mpJtQbYagFOw0OQ1m98Sj+FBiPwDgoGdG5heUlR1WkRWMxAqo7Ixqx0RkmcYyZU7mpDMoR4KKV6OA1ViBGhc+vWBy5vEWFqB059b2byqFZ4f3lXE5bLDZCp8+00VOY7Fg580jQ3KZs2x4TzYnA0qwEx5WxhzoYVbZkYKdAyfHNL8XGvXkc2UYrJUGZamLcjiGd4+PYM/RYTjtAmp8UhFFLAxBEDClnv3NqJD6vkVRVDUVLLJnh1PXBpz/Ffbzs/+WeSYaVWMZQsFOJaJuLGhR92SO3No/x2GgIwmpj47DoFneibfhQhTDohf7EyZSb1zZObkPoigiFhlHvSAFCIXw7AwdUo2JONPU07l50VTZOSfdyAiewrK7NM3YKhmu7PATOqeYwY5bTmOxY7NkKSxADnZq4sOwIYHdR4bknjCpyo7qc+IJKGnsdOqOyWCHl5+fDEpDQE22EsgXdRdlvtZYeQzUqQzKf9jBVJ0Lz2yFgzf0lALFKfVS0IXU9x2OJRCXxpuUYmCszPmrWPryVA+wfZ3+Y4aPARvvA6JSSxLqoJwCBTuViHryuUXdkzmePIeBDsdZsFPvMLjqPPY6AOAtcRpOBA0CIjU82DnVg3AkioYEU3VEu4st/Fag9uzwFFbzmYDD3ML/kRnN6GqsMV2GDSC9Z0fdY8fKMuASYuTZUQaBFs+gPDzOAq6SlZ0DchAiQIQfo9h9ZFg2KMudrUd0gh1BUEzK6Xw7pj07zrS/FwpNGiteuDTW0HgUT0gprKu7Jynl5dJFxqRaqdVGIlW5UafyS5bGAlhK6sK72M+bHwBCw8p9fW8DT9wK/Ogs4MUfsdsmn6cogIRMibQ5Ii/U87G4smOVZyePNFYoGsdQwg3YgXrBINg5ugsAsDsxnU0+z4R/ClM44hGETh7Udk+2KhBQe3ZMTDpPZk6nH1u+eVF2fzOdZ6cIfp1iY2R8L8W4CJ7GKlklFsAqDT1+IDSEJtsI9o3WA2AqQk2yQTm5f5avmXn10lVkZanscCwbFZEBuTIuGpd7HhXCoLzp7T70DodQ73HgY7NagR3coMzWng4fW0OCOsEOT2G57DbzXrxCcc5K4JU1rCr1xR8BH1oKvPQQ8O7TymOmfBi44DbgjEuq5iLJSijYqUTkk3MBPTs5GJRPBiMYkzw7nj2/AU68nvqg9zcBYMHOKTPBjs3OenT0v4tI33ty2blg1agIQOvZybLsPGfSKTv8RFXASqxioxjfkzw7xeygbNcGXCVNYwEsmA0N4ayGOPadBPpHWSqpNrn0PLnS0kyvnfFB9j3LYMeyIaAZ4IFmOJZAuIDKzpFBdjHxyXmdLHWWpOy0Sda5kVjqsTAuj4oo8XECsOB42f3Ab68H/v4D9gUAEICZn2RBzpTzSrmFZQ8FO5UIV3aGjgB8rotFoxO8Bm39zXByNIxeUZLnj+4Eju7UfZwIATvF0+EzE+wAbEZW/7sQ+/dZPyoC0Hp2siw7zxnZoKyn7BS+x06xMeyzU8TZWMnKQanmYsnUNAEDH+Cshhg2qOIWxbPDKy2TjvVMvXbiUaW5XLbKTtGrseIF9exwrj5nEvvBoZSeA0CLl6lpw1E7EglRY1gvSffkdMz8FDBlEXBoK2B3A2dfByz+CtBsrrHnRIeCnUqEKxF8QKYnoEzSzpN8+uycHI3gJ7FPwVbfgVvON25yeNgxDYeecKHJbLDTyObrCAPvowVSebhVoyIAZX+O9imqismy85xJ10FZ7rFTPWksw9lYJeizw/GUquycI/1/z6jTVtj4XA6WZuHHYkoaK0OvndCQ8rMnfcPI1GCnWNVYikG5MNVYyqltSqMXC3kvKYfWoNzgYn87JDpwfCQk954ClCGiZaHsACw19bnHgPeeAWZ8wroCjQkCBTuVCFd2+Pwmi7onA/mlsU6MhjGMWrzS/GncssRYUvWOhoEnnsXJYATReCJzPlwyKTsGe9DM+2FY1WMHUNKCPMjwtRR+IUlXei57dqonjeWRZ2OVrhorOVVW8pOYFOxM82oD3hq3XVFtbM5UdcYnBcFGyg4Pktx+wJ5+iU9WcorRQRnQGpQLoeyog52ruicrPb940YGk7NgTLNAMw4kDJ8c0wc54tITdk42obQG6ry/1VlQkVI1VifCT8wAPdqxL6XjzGAR6UvIcNNWmXzAba1zyTB7+nPRPYMGOe3i/alSEhWms5J4UhU5hAcoVZto0VvUEO0oay8izU7xqLI6nCKmztEjBbJszqPGT+lwObSVWstmUKztGnh3ZnBzIuAlqZcftsBUtAFSnsQpSjeVRBzuTlDv45473sZIUnrDoxMGT2gsPJY1FmkA1QMFOJcKVHa4KWKns5OnZAYCWDCMTbDYBzVJAZKoiq4nlpL3BQ+gQpEDAyjRW8hyZQpuTgQwG5cKPiig28nFVwkGgySfTkpaeA7Ky4wydwoda2DEoCNJ26XVPlp+XwbNjshIL0KatipXCAlR9dlQT6K08Bma01uHDpzXi+g93YXqzqldVkkFZDnbgxIGBoOY1ys6zQ+QFhayVSHKjOQtVjnw8O/1SsJNJ2QHYpO3jw2H0jYQAZBhEWdcBOLywxcYxR9jPbrM0jZUU7BTarwNkMChXX+m5kUG5FOMi5G0q9UmM/3/HTuKsSX7s6xuFz+VgKRejsnMgs2cni2BHrewUK4UFqDooqzw7VlbkuRw2rL9pceodDm3pOQ96wnDJXaQ5JR8VQVgKKTuVSPLcE4vKzoH8S88BoMmXeWwCV39MKTs2m2xS9vApxVZ6amw2Nn+GU4w0VroOymP8ZFU9yo5hn50SenasNMTmhCrYmStNnvdlKjsHMs/HyiLY8TjtsuKVbFYuJJo0Fp+NVYzquDTKzqEB/TSW10maQDVAwU4lkqzsWBnsuPS9FWbgfUKa6zIHO611bNExFewASidljtUGYu7bsTmB5jOsfW090hmUqzCNVWMwYLYUU885JW0qCGiCHV4txD8XhmXngBLsREb1lcEsgh1ACXKKquyo01ix4vVaMlR2RCcOpAQ7ZWhQJnKGQtZKJDntYlH3ZMC4rb8Z5DSWifk6LVJAdGI0+2AnJjjhsGpUBMdVC+A40GJ+TEReOA0MyrGwMn29ioIdo6nnPIXhLIFBuSyaCgLA2ADmTwlg7fXn4PRWVRsEQD+od9ezoDwRZepOYIr2/iyDnUCNE30jYTT4iq/shKJqZacYwY6xsjM4FsXQeFQO/mTPjpuCnWqAlJ1KpIDKjifHNFYiIWJASmM1ZzAoA0qw0zdstteOEuyMOxusb4fOlZ1imJMBY2WHV2IJNlY6XCUYpUflE11JlJ3yMCgjPATEo7hkbgdOb5VS1LJnR+ezLQjpfTs5Kjv+Io2KANRTzxMIx9gx4S6hsuN0s7SyuiJLDnYojVUVULBTiSQrOwVJY2UX7AyOR+UJwY0mlJ3WPJSdcXcBhtzxcv5i+HUA49lY46ruybbq+XiqPTuiKMq3F9WgbC+zNJbHz4JaQAlyOXpDQNXIvXasCHbY57VYoyIApew/HEsUV9mxazso8++1PnYBqa7IGqc0VlVRPavpRELdF8btV06cFpDrIFBedu73Ok35L+Q0VjYjIyQSNRaWnXPmXAk0TAdmfcr619bDqBqrCnvsANrKJ+7RAErcVLDUwY7NrgQkY6qeOaKYvvQcUPXayT/Y+eS8dnQ11uAfzijA58oApRorrvLsFMOgzIMdqc+O1G+nvpatqQd0lJ2SV+0RlkD6XCWiTmNZqOoAygkgEksgnhDl5n+Z6DfZUJAjp7FGQhBFUelwaoSvBUF44cM4nPUWNhTknPcl9lUsMik7VVR2DmgDi/FIHB4nm0UUk9TAYgQ7NpsAp11ANM7+ZlGUhEzUNLFARx3shIYyz7xL12sny2Dnqu7JuKp7sskNtgZ3qZSdFM8O++6vZ8quOo3FL/hI2akOyuDTTmSNOo1lYfdkQHsVk41JmZuTzfh1ACXYCUUTGA3HMjwaCEbi+CDB3quvybomiiWDL7qJKBvcyKnCUREAYLcJckDDTyLcnAwUJ9gBtOpOydNYgKYiS4arOh6/8cw7Cz07pUAzCLSk1Vjse6OfBTvqNFYwTGmsaoKCnUrEZgcckjJgYfdkQGsUzSaVdVIOdswpOzUuhzy/xkwqa//JIPYmpgIAPK1VMOWXp7EArbpTpWksQFF3eHpAnc4qRjUWoA2qyj7YSdcs1Mizk4gD44Ps54oIdspD2Wny1wMwMCjTuIiqgIKdSoX7dqycEQVAEAS5SiWbiqyTWVRicbLx7ezvH8P3Ytfiwbo7gbM+a/pvlC0ONwDpBK8OdvhVeZUpO0BqW4OIKtgpylU9tMFOyT07gPJ/VhuU5bLzdMGOgWcnNARAMoCbmI1VKtyq6rxCdFA2JGnqOf/e2sAqH48Nh+TqMEpjVRcU7FQq3LdjsWcHyK3XjtJjJ4tgp5b7dswpO/3w48Dky42l/UpCEPTLz6s0jQWkVvpFVSe5jJ4ti9AqO2Ww/OWq7HDPTnIaiwfLTp+SsilDuLKjTmEXJZXp0K/Gqq+rhc9lhygChwbYxQcZlKuLMvi0EznBfTsFDHbGslB2sjUoA0BLvXll54MTLJeuGepX6eiNjKjiNFZyD6diVmJxytazM65SdkbSdE/myMpOkkG5AlJYgBLsjIRiqtuKOS5Cq+wIDje6mtjaclDy7YxTGquqoGCnUpl7DdA0A5j2EctfukHqk9NvtgcO1J6d7JUdM7129p9kC9C0agx21MpOFY6K4Hid+gblogY7qhNqWaSxeFCrUXbSdE/myJ6dk9rbK8CcDCiBzUhIMecXxbfFu6MnKTtweNDVyD6PB06OQRRFBKnPTlVBwU6l8pE7gK9ss35GFIBJAfahP3xKZ+6OAfJcrGyUnSy6KO/vr2JlR8+gXGWl54ByhZzs2SmWORnQBlZlU3oO6Kex0qm2XNmJBoGIOljmwU7Ask0sBHzfS50H4HYUKZXJlZ14mPUz4v12HG5MlZSdAyfHEI4lwHtfUhqrOiiDTztRbkxuYF6SI4Pmgx2u7DRloeyY7aI8NB6VDdDVqeyogx3ppFfFaazkaqxiKjvuck1j6Xp20lzIuGqVbsBq306FKDvJ+75ox4DaxxSPJCk7bN07ODCmSeHXlMNxQuQNBTtECpMa2En4iEllZzwSR1BaHHJRdjJ5driq01LnlsvVq4Jkg3IiLlXToDrTWEmTz6PFrMKRqIxqLBMGZfV8LLVvp0Kq+ZJnoRXFrwMoyg4ARIKAKAU1DjemNqmDHZbCcjlscBTx+CQKB/0XiRTkNJZJZYd7e1wOW1bBiBLshNI+jvt1pjdVkaoDqJQd6f2PD0IpGy7vK/NcSPHsyMpO8YIOHuzYbQKc5XAS48pOZJQdB/GoovLoDQFVo+fbqRBlJzXYKdL/wq66GAsNKj87PJjayA3KY6oeO2UQEBOWUEWXyYRVTM5S2ZF77PhcWeXdW+s88vNj8YThFVRPPzcn1+jeX7E4kgzK3Jzs9gP24g1lLBZGfXZKUY3lKeLfTIvHDwh2pjCMD0A2itgcmQOWdMpO2Qc79qTfi/T/EASm7sRCiooKAHY3OgOAwyYgEkvI1Z+UwqoeyuQTT5QTXNnpHw2b6rXTL6Whmuuy6+vR6HPBJrD1fUAKmPRQgp1qVXakoFLusVPeJ6pc8SSlsZRmcsU3KJeN6VQQtL4dnsLytWaeeq/Xa6dCgh2nXYD6uqiYAa/s2+HBjt0F2Fi6iqfw3+4dBlBGxwmRNxTsECkEapyyfHvURCrrZJA3FDTv1wFYKqHJRGNB7tk5rWqDHUnZqeIeOwBQ42RCcmoaq/ienaJ5RMygCXaksnMzM+98OsNAKyTYEQRBo+YUTdkBFGO3HOwoF2ncpPz2sREAgK+aPIITHAp2iBQEQZDVHTMVWUpDwew7trZmMCmLoljFyg43KEv7uEonnnO8Lu0YkqKOCZDgwU5ZdE/maIIdEw0FOXKwU3meHUAbcBZX2ZFMyqFh6Xdl3eImZVnZoTRW1VBGn3iinMimIivbiedqMlVknRqLYljqssoNhFVDcgflKh4VASgnjpIqO/YyS2MB2oosMw0FORXs2QG0ak5J0lhhHuwoFVp8jTkwwNRWMihXDyUNdrZs2YLly5ejs7MTgiDgiSeekO+LRqP41re+hbPOOgs+nw+dnZ34/Oc/j6NHj2peY2BgACtWrEB9fT0CgQBuvPFGjI6OFvmdVB/ZKDsnc2goyMnURbmnn/0vO/ye8jpBWUFy6XmVp7E8hsFO8f6v/ATrKds0lomyc/l5SZ4dUaysYMepTmMV8f8hKztSGkul7HRJyg73idOoiOqhpMFOMBjE/Pnz8cgjj6TcNzY2hh07duCee+7Bjh078Ic//AHvvPMOLr/8cs3jVqxYgT179mDjxo148sknsWXLFtx0003FegtVSzbKDvfs5KPs9A3rl5/39LNAoKo6J3P4QNMJk8YyMiiXIo1VBcFO8uTz8IjSN6YSgh11GquYbQCSDcpqZadJW/FZdRdYE5iShq2XXnopLr30Ut37/H4/Nm7cqLntxz/+Mc477zwcPHgQXV1d2Lt3L55++mm89tprWLhwIQDg4YcfxmWXXYbvf//76OzsLPh7qFay6bXTP5L9EFBOpi7K+6vVrwMYKztVWo1lXHpexGose5kHOyPZBDu8z06/VtVxeJQUaRmj9k0VdXRHOmWnURvsUBqreqgoz87Q0BAEQUAgEAAAvPzyywgEAnKgAwDLli2DzWbD1q1bDV8nHA5jeHhY80VoyabXjlKNlYuywxYeI89OT7U2FAR0Ss+rO40lKzvJwQ4ZlNn3XJWd2DjrBlxBKSygPJWdGpdDVpv570R1UEaf+PSEQiF861vfwnXXXYf6+noAQG9vL1pbtUY+h8OBxsZG9Pb2Gr7W6tWr4ff75a8pU6YUdNsrkUkBdoXTOxxCTEo36BFPiHKPnOa6HDw7GQzKPSeqWdlJCnaqeOI5oCg7Y8njIopoTl0yoxlTm2pw8ZwM3YmLCQ92glmWnrt8SmPKsf4KDHbKT9kBgKkqdYeUneqhIoKdaDSKz372sxBFEWvWrMn79e6++24MDQ3JX4cOHbJgK6uL1jo3nHYB8YSI42l64Jwai8iTixtrck9j6fXZEUVRGRVRlcFOUul5FU88BxRlh6exSjEIdE6nH5u/cRGWzy+jFDcPbgcPMJUGYE0FzaD27VRwsOOyF9OgLK1TBsFOVxMFO9VI2Qc7PNA5cOAANm7cKKs6ANDe3o6+vj7N42OxGAYGBtDebnzl5na7UV9fr/kitNhsAjr8mVNZvBKrocaZ08A8ruyMReIIhmOa+06MhDEWicMmpObSqwKHyqAsioqyU61pLKeRQXmCn1B4cMtLod31gMvk8a727VRcsKP830uj7KT22QG0LS7IoFw9lHWwwwOd9957D88++yyamrRXvIsXL8bg4CC2b98u37Zp0yYkEgksWrSo2JtbdSjl52OGj8mnxw7AOpTyq6fkVBZvJjipwVvcPhzFQm1QDg8DCSnYq/I01ng0DlEUZc+Os4gG5bIkWckz49fhqHvtyMFOwJLNKjTqAKcknp1wqmcH0FZkkbJTPZTUfTU6Oop9+/bJv/f09GDXrl1obGxER0cHPv3pT2PHjh148sknEY/HZR9OY2MjXC4XZs2ahUsuuQRf+tKXsHbtWkSjUaxatQrXXnstVWJZgJnycx7s5FKJxWmtc2P/yTH0jYQ13hy5c3I1mpMBrWeHp7CcNRVRSZMLfDZWQmSqTikMymWJy8dGFsSlYD+bYEfda6filJ1Se3b0lZ0pKhXZ6ySDcrVQ0lVm27Zt6O7uRnd3NwDgjjvuQHd3N+69914cOXIEf/zjH3H48GGcffbZ6OjokL9eeukl+TUee+wxzJw5E0uXLsVll12GJUuW4NFHHy3VW6oqzDQWPJnHqAiOkUmZV2JV3UwsjtqzU+UpLEDbej8UScgG5aLORSpH1MNAAXPdkznyyIh+YHyQ/VwxwU6Jq7EgmQ3TKDs+Nyk71UJJw9YLL7wQIm9VqUO6+ziNjY349a9/beVmERJc2TlsQtlpsSTY0TYWrOoeO4BqXMR41ffYAQCn3QanXUA0LmIsGivJuIiypaYJGJG6w9dlUSmmDnbCbHhl5QQ7JRoEmhTcJCs7TT4XfC47gpE4pbGqCFplCEMmB8wblLOdeK6mlffaGdX37FR9sBOPKPONqljZAVQjIyJxxaBMwY7Wp5WVsqPn2amQYKdk4yKSLsySgh9BEHDdeV2Y3VGPWR1UvFItUEKSMET27AyOQxRFCEKqkVRuKGiBstM3rAQ7iYSIAyelURHV7tkBgKEj7HuVlp1zvE47RkIxjEfjSun5RK/GApLSWDl6dmLswqNSgh1Pqaeey7+nrl3/+qnZRdoYoljQJRVhSIffC0Fg/VD6JQUnmRN5DAHl6A0DPTYcQjiWgMMmyN2cqw71ojt8mH2v0kosjrrXjlyNZZ/g1ViARZ6dSlZ2ingqsietVfbcL9SIyoGCHcIQl8MmN/0zMimfHLVO2VEblLlfp6uxJqf+PRWBICgmZa7sVHkaS+m1kyDPjhpNsJOjZ0c2uVdIsFM2yo5H/3FEVUGrDJGWSRl8O9yzY4VBWd1F+YNq9+tweCprmKexqjvY8ah67ZRiXETZkm8aKx5m3i+ggoKdUnl2MqexiOqDVhkiLZMbmPKg11gwGI7JQx3z7bMDMJUoLs2e2F/tPXY4fLbRBPHs8OqWsUhMNihP+NJzQAlyBXt2x4CrBnCqPiM2B+CqtXbbCoSmqWBRlZ30BmWiOqFVhkhLusaCXNXxOG15lWg2+lwQBNZsjg8V5cHO9JYqD3a4ssO7uU6QNJbas0MGZSjBTm0rYMtyWeapLICpOjqFBOWIZlxEmRmUieqDgh0iLekaC/YHlVERepVaZnHYbWjy8VQW67XDGwpWbSUWJ7lbchX32QGULsrjkTiNi1AzaSHQOgeYf132z00OdioEzSBQUnaIAkOl50Ra0jUW7B/J35zMaalzo380jBMjYcTiCRyUys6nNVfhAFA1zqT3V+VpLGU+Fo2L0OCpB778UubH6cF77QAVFuyQskMUD1pliLRMTqPsnJRSTs15NBTkqCuyjgyOI5YQ4XLY0Omv0rJzTrKyM0HSWONRaipoGTUVquyQZ4coIrTKEGnhys5IKIbhUFRzH1d2cp14roablE+MhlUDQGtgs1V5ikMd7NgcgLuudNtSBLxyGitGwY5VUBorO1KCnfwv1ojyh1YZIi01LgcaapwAUk3KXNnJpxKLo+6iXPXTztWogx1vY8WYS3OFKzuj4Rj46Ds3GZTzo0KDHY9TncYqn3ERRHVCwQ6REaOKrH4LGgpy1F2U5Uqsau+xA2iDnSr36wCKsjM4pqiEpOzkScV6dsplECgFOxMBWmWIjBhVZPFgJ59REZzWesWz08NnYk2IYEdlUK7yhoKAouwMjSvBDo2LyJNK9eyoOygX06ROBuUJCVVjERmZFOCNBZPSWPJcLAuVnRGlsWDVd08GktJYlXOiyhUe7HD/l01A9Y4DKRYVmsZqrXPjgtOb0ORzF9ebR2msCQkFO0RGjNJYcjWWRaXnANA7FEI4xroyTzxlp/rTWLzPDld2KIVlAZpgJ1CyzcgWm03AY//04eL/4eTBn6TsTAhopSEywtNYh1XKTiyewKkx6w3K49E4EiIbK8ArtKoa9VXlREpjSZ4d6rFjAZo0VvUfQ3ljd7DKRw4pOxMCWmmIjEzWUXYGxiIQRZaGaKjJP9ipdTvkEyHAKrHy6cpcMaiVnQlwouJjRUbCMQCAq5hVONWK0wPUdQCCDajvLPXWVAY8wBFs2sCHqFrov0xkhCs7/aNhhKJxeJx22a/T6HPBbkG+XRAEtNS5cXBgApmTgaRqrOoPdni5MS87d5E52RquWw8ETwB17aXeksrALl2g2d1V3+6BYJCyQ2QkUOOUr8iPSqksuezcZ12qqUWVtqr6MRGciVZ67tQqOeTZsYjOs4EZHy/1VlQOXNkhv86EgVYaIiOCICi+HSmVxZUdK/w6HLVHZ0I0FAQmXBqL99nhULBDlAQe5JBfZ8JAKw1hCrkiK0nZsaISi6NWdk5rmSjBzsQ0KHMo2CFKAik7Ew5aaQhTyI0FT/Fgx3plp6V2gis7EzGNRdVYRCkgZWfCQSsNYYpkZedkAZQd3kW5zuNAowWT1CsC2bMjAB5/STelGFAaiygLSNmZcNBKQ5giVdmxblQEZ6qk5sxqr58YZecA4GsFIAD+KYCt+suwnXZBU73nJGWHKAWk7Ew4qPScMMXkZGWHTzy3sBpr0fRGPHRdN+ZNqn6FQ6a+A/i/fjdhSoYFQYDXaceo1GenqAMgCYJDys6Eg4IdwhR8PlbvcAixeKIg1ViCIODy+ROwKdoZnyj1FhQVjyrYoTQWURIc0rpFwc6EgVYawhStdW447QLiCRG9wyGcKIBnh5gYeF3KskMGZaIkyMoOpbEmCrTSEKaw2QR0+Fkq653eEURiCQAU7BDZo67IImWHKAmyZ4fWr4kCrTSEabhv5/XDQwAAn8ueUl1DEJnwupTsORmUiZJAys6Eg1YawjS8IuuNw4MAgCZSdYgc8DpVaSxSdohSQAblCQetNIRpeK+d1w8NArDWnExMHCiNRZQcX4v0vbW020EUDarGIkzDlZ1TY1EA5NchckOd+nRTGosoBefeyNo9nHFJqbeEKBIU7BCm4coOx8qGgsTEwUPKDlFq3HXA/GtLvRVEEaGVhjDN5ECN5ncrGwoSEwdKYxEEUWxopSFM0+73QD3FgZQdIhdqVGksqsYiCKIY0EpDmMblsKGtTinVpGosIhdI2SEIotjQSkNkhdq3Q9VYRC54VMoOdVAmCKIY0EpDZAWvyAKAFlJ2iBwgZYcgiGJDKw2RFVplh4IdInvUwQ5NPScIohjQSkNkBVd27DYBAa+zxFtDVCJeMigTBFFkaKUhsoIrO40+F2w2IcOjCSIVSmMRBFFsqKkgkRULpzZgZnsdPnpmS6k3hahQvGRQJgiiyFCwQ2RFnceJp2//h1JvBlHBkLJDEESxoZWGIIiiQuMiCIIoNrTSEARRVDSDQCnYIQiiCNBKQxBEUVGnsagaiyCIYkArDUEQRUU9G4vSWARBFANaaQiCKCoazw4pOwRBFAGqxiIIoqh4nHZcdlY7RsNxNPpovhpBEIWHgh2CIIrOf61YUOpNIAhiAkEaMkEQBEEQVQ0FOwRBEARBVDUU7BAEQRAEUdVQsEMQBEEQRFVDwQ5BEARBEFUNBTsEQRAEQVQ1FOwQBEEQBFHVULBDEARBEERVU9JgZ8uWLVi+fDk6OzshCAKeeOIJzf2iKOLee+9FR0cHvF4vli1bhvfee0/zmIGBAaxYsQL19fUIBAK48cYbMTo6WsR3QRAEQRBEOVPSYCcYDGL+/Pl45JFHdO//3ve+h4ceeghr167F1q1b4fP5cPHFFyMUCsmPWbFiBfbs2YONGzfiySefxJYtW3DTTTcV6y0QBEEQBFHmCKIoiqXeCAAQBAEbNmzAlVdeCYCpOp2dnbjzzjvx9a9/HQAwNDSEtrY2rFu3Dtdeey327t2L2bNn47XXXsPChQsBAE8//TQuu+wyHD58GJ2dnab+9vDwMPx+P4aGhlBfX1+Q90cQBEEQhLWYPX+XrWenp6cHvb29WLZsmXyb3+/HokWL8PLLLwMAXn75ZQQCATnQAYBly5bBZrNh69athq8dDocxPDysDb4AFQAADwhJREFU+SIIgiAIojop22Cnt7cXANDW1qa5va2tTb6vt7cXra2tmvsdDgcaGxvlx+ixevVq+P1++WvKlCkWbz1BEARBEOVC2QY7heTuu+/G0NCQ/HXo0KFSbxJBEARBEAXCUeoNMKK9vR0AcPz4cXR0dMi3Hz9+HGeffbb8mL6+Ps3zYrEYBgYG5Ofr4Xa74Xa75d+5bYnSWQRBEARROfDzdib7cdkGO9OnT0d7ezuee+45ObgZHh7G1q1bccsttwAAFi9ejMHBQWzfvh0LFiwAAGzatAmJRAKLFi0y/bdGRkYAgNJZBEEQBFGBjIyMwO/3G95f0mBndHQU+/btk3/v6enBrl270NjYiK6uLtx+++34zne+gxkzZmD69Om455570NnZKVdszZo1C5dccgm+9KUvYe3atYhGo1i1ahWuvfZa05VYANDZ2YlDhw6hrq4OgiBk9R6Gh4cxZcoUHDp0iCq5TED7K3ton2UH7a/soP2VPbTPsqOQ+0sURYyMjGQ855c02Nm2bRsuuugi+fc77rgDALBy5UqsW7cO3/zmNxEMBnHTTTdhcHAQS5YswdNPPw2PxyM/57HHHsOqVauwdOlS2Gw2XHPNNXjooYey2g6bzYbJkyfn9V7q6+vpoM8C2l/ZQ/ssO2h/ZQftr+yhfZYdhdpf6RQdTtn02alUqEdPdtD+yh7aZ9lB+ys7aH9lD+2z7CiH/TUhq7EIgiAIgpg4ULCTJ263G/fdd5+muoswhvZX9tA+yw7aX9lB+yt7aJ9lRznsL0pjEQRBEARR1ZCyQxAEQRBEVUPBDkEQBEEQVQ0FOwRBEARBVDUU7BAEQRAEUdVQsJMHjzzyCKZNmwaPx4NFixbh1VdfLfUmlQ1btmzB8uXL0dnZCUEQ8MQTT2juF0UR9957Lzo6OuD1erFs2TK89957pdnYMmD16tU499xzUVdXh9bWVlx55ZV45513NI8JhUK49dZb0dTUhNraWlxzzTU4fvx4iba4tKxZswbz5s2Tm5QtXrwYTz31lHw/7av0PPDAAxAEAbfffrt8G+0zLffffz8EQdB8zZw5U76f9lcqR44cwfXXX4+mpiZ4vV6cddZZ2LZtm3x/Kdd9CnZy5Le//S3uuOMO3HfffdixYwfmz5+Piy++OGUw6UQlGAxi/vz5eOSRR3Tv/973voeHHnoIa9euxdatW+Hz+XDxxRcjFAoVeUvLg82bN+PWW2/FK6+8go0bNyIajeITn/gEgsGg/Jivfe1r+NOf/oTHH38cmzdvxtGjR3H11VeXcKtLx+TJk/HAAw9g+/bt2LZtGz72sY/hiiuuwJ49ewDQvkrHa6+9hp/85CeYN2+e5nbaZ6nMmTMHx44dk79eeOEF+T7aX1pOnTqFCy64AE6nE0899RTeeust/OAHP0BDQ4P8mJKu+yKRE+edd5546623yr/H43Gxs7NTXL16dQm3qjwBIG7YsEH+PZFIiO3t7eJ//ud/yrcNDg6Kbrdb/M1vflOCLSw/+vr6RADi5s2bRVFk+8fpdIqPP/64/Ji9e/eKAMSXX365VJtZVjQ0NIg/+9nPaF+lYWRkRJwxY4a4ceNG8aMf/ah42223iaJIx5ce9913nzh//nzd+2h/pfKtb31LXLJkieH9pV73SdnJgUgkgu3bt2PZsmXybTabDcuWLcPLL79cwi2rDHp6etDb26vZf36/H4sWLaL9JzE0NAQAaGxsBABs374d0WhUs89mzpyJrq6uCb/P4vE41q9fj2AwiMWLF9O+SsOtt96KT37yk5p9A9DxZcR7772Hzs5OnHbaaVixYgUOHjwIgPaXHn/84x+xcOFCfOYzn0Frayu6u7vx05/+VL6/1Os+BTs50N/fj3g8jra2Ns3tbW1t6O3tLdFWVQ58H9H+0yeRSOD222/HBRdcgLlz5wJg+8zlciEQCGgeO5H32e7du1FbWwu3242bb74ZGzZswOzZs2lfGbB+/Xrs2LEDq1evTrmP9lkqixYtwrp16/D0009jzZo16OnpwUc+8hGMjIzQ/tLhgw8+wJo1azBjxgw888wzuOWWW/DVr34Vv/zlLwGUft0v6dRzgiBSufXWW/Hmm29q/AFEKmeeeSZ27dqFoaEh/P73v8fKlSuxefPmUm9WWXLo0CHcdttt2LhxIzweT6k3pyK49NJL5Z/nzZuHRYsWYerUqfjd734Hr9dbwi0rTxKJBBYuXIh///d/BwB0d3fjzTffxNq1a7Fy5coSbx0pOznR3NwMu92e4rw/fvw42tvbS7RVlQPfR7T/Ulm1ahWefPJJ/O1vf8PkyZPl29vb2xGJRDA4OKh5/ETeZy6XC6effjoWLFiA1atXY/78+XjwwQdpX+mwfft29PX14ZxzzoHD4YDD4cDmzZvx0EMPweFwoK2tjfZZBgKBAM444wzs27ePjjEdOjo6MHv2bM1ts2bNklN/pV73KdjJAZfLhQULFuC5556Tb0skEnjuueewePHiEm5ZZTB9+nS0t7dr9t/w8DC2bt06YfefKIpYtWoVNmzYgE2bNmH69Oma+xcsWACn06nZZ++88w4OHjw4YfdZMolEAuFwmPaVDkuXLsXu3buxa9cu+WvhwoVYsWKF/DPts/SMjo7i/fffR0dHBx1jOlxwwQUp7TLeffddTJ06FUAZrPsFt0BXKevXrxfdbre4bt068a233hJvuukmMRAIiL29vaXetLJgZGRE3Llzp7hz504RgPjDH/5Q3Llzp3jgwAFRFEXxgQceEAOBgPi///u/4htvvCFeccUV4vTp08Xx8fESb3lpuOWWW0S/3y8+//zz4rFjx+SvsbEx+TE333yz2NXVJW7atEnctm2buHjxYnHx4sUl3OrScdddd4mbN28We3p6xDfeeEO86667REEQxL/+9a+iKNK+MoO6GksUaZ8lc+edd4rPP/+82NPTI7744ovismXLxObmZrGvr08URdpfybz66quiw+EQv/vd74rvvfee+Nhjj4k1NTXir371K/kxpVz3KdjJg4cffljs6uoSXS6XeN5554mvvPJKqTepbPjb3/4mAkj5WrlypSiKrAzxnnvuEdva2kS32y0uXbpUfOedd0q70SVEb18BEH/xi1/IjxkfHxe//OUviw0NDWJNTY141VVXiceOHSvdRpeQL37xi+LUqVNFl8sltrS0iEuXLpUDHVGkfWWG5GCH9pmWz33uc2JHR4focrnESZMmiZ/73OfEffv2yffT/krlT3/6kzh37lzR7XaLM2fOFB999FHN/aVc9wVRFMXC60cEQRAEQRClgTw7BEEQBEFUNRTsEARBEARR1VCwQxAEQRBEVUPBDkEQBEEQVQ0FOwRBEARBVDUU7BAEQRAEUdVQsEMQBEEQRFVDwQ5BEBXL/v37IQgCdu3aVbC/8YUvfAFXXnllwV6fIIjCQ8EOQRAl4wtf+AIEQUj5uuSSS0w9f8qUKTh27Bjmzp1b4C0lCKKScZR6AwiCmNhccskl+MUvfqG5ze12m3qu3W6fsFOmCYIwDyk7BEGUFLfbjfb2ds1XQ0MDAEAQBKxZswaXXnopvF4vTjvtNPz+97+Xn5ucxjp16hRWrFiBlpYWeL1ezJgxQxNI7d69Gx/72Mfg9XrR1NSEm266CaOjo/L98Xgcd9xxBwKBAJqamvDNb34TyRN1EokEVq9ejenTp8Pr9WL+/PmabSIIovygYIcgiLLmnnvuwTXXXIPXX38dK1aswLXXXou9e/caPvatt97CU089hb1792LNmjVobm4GAASDQVx88cVoaGjAa6+9hscffxzPPvssVq1aJT//Bz/4AdatW4ef//zneOGFFzAwMIANGzZo/sbq1avxP//zP1i7di327NmDr33ta7j++uuxefPmwu0EgiDyoyjjRgmCIHRYuXKlaLfbRZ/Pp/n67ne/K4oimwZ/8803a56zaNEi8ZZbbhFFURR7enpEAOLOnTtFURTF5cuXi//4j/+o+7ceffRRsaGhQRwdHZVv+/Of/yzabDaxt7dXFEVR7OjoEL/3ve/J90ejUXHy5MniFVdcIYqiKIZCIbGmpkZ86aWXNK994403itddd13uO4IgiIJCnh2CIErKRRddhDVr1mhua2xslH9evHix5r7FixcbVl/dcsstuOaaa7Bjxw584hOfwJVXXonzzz8fALB3717Mnz8fPp9PfvwFF1yARCKBd955Bx6PB8eOHcOiRYvk+x0OBxYuXCinsvbt24exsTF8/OMf1/zdSCSC7u7u7N88QRBFgYIdgiBKis/nw+mnn27Ja1166aU4cOAA/vKXv2Djxo1YunQpbr31Vnz/+9+35PW5v+fPf/4zJk2apLnPrKmaIIjiQ54dgiDKmldeeSXl91mzZhk+vqWlBStXrsSvfvUr/OhHP8Kjjz4KAJg1axZef/11BINB+bEvvvgibDYbzjzzTPj9fnR0dGDr1q3y/bFYDNu3b5d/nz17NtxuNw4ePIjTTz9d8zVlyhSr3jJBEBZDyg5BECUlHA6jt7dXc5vD4ZCNxY8//jgWLlyIJUuW4LHHHsOrr76K//7v/9Z9rXvvvRcLFizAnDlzEA6H8eSTT8qB0YoVK3Dfffdh5cqVuP/++3HixAl85StfwQ033IC2tjYAwG233YYHHngAM2bMwMyZM/HDH/4Qg4OD8uvX1dXh61//Or72ta8hkUhgyZIlGBoawosvvoj6+nqsXLmyAHuIIIh8oWCHIIiS8vTTT6Ojo0Nz25lnnom3334bAPBv//ZvWL9+Pb785S+jo6MDv/nNbzB79mzd13K5XLj77ruxf/9+eL1efOQjH8H69esBADU1NXjmmWdw22234dxzz0VNTQ2uueYa/PCHP5Sff+edd+LYsWNYuXIlbDYbvvjFL+Kqq67C0NCQ/Jhvf/vbaGlpwerVq/HBBx8gEAjgnHPOwb/8y79YvWsIgrAIQRSTmkgQBEGUCYIgYMOGDTSugSCIvCDPDkEQBEEQVQ0FOwRBEARBVDXk2SEIomyhLDtBEFZAyg5BEARBEFUNBTsEQRAEQVQ1FOwQBEEQBFHVULBDEARBEERVQ8EOQRAEQRBVDQU7BEEQBEFUNRTsEARBEARR1VCwQxAEQRBEVUPBDkEQBEEQVc3/Dw0yGaOvNvpIAAAAAElFTkSuQmCC", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# Compare Score RL & KNN\n", "plt.plot(range(1, episodes + 1), rl_scores, label='Reinforcement Learning')\n", "plt.plot(range(1, episodes + 1), knn_scores, label='KNN')\n", "plt.xlabel('Episode')\n", "plt.ylabel('Score')\n", "plt.title('Reinforcement Learning vs KNN (Score)')\n", "plt.legend()\n", "plt.show()\n" ] }, { "cell_type": "code", "execution_count": 1038, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "AYM5wBQ58fxr", "outputId": "88cad5d6-a3b6-43af-9f85-7473f072b37d" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Average Reward RL: 3.90\n", "Average Reward KNN: 3.78\n", "\n", "Average Score RL: 153.43\n", "Average Score KNN: 146.42\n" ] } ], "source": [ "# Average Rewards\n", "avg_reward_rl = np.mean(rl_rewards)\n", "print(f'Average Reward RL: {avg_reward_rl:.2f}')\n", "avg_reward_knn = np.mean(knn_rewards)\n", "print(f'Average Reward KNN: {avg_reward_knn:.2f}\\n')\n", "\n", "# Average Scores\n", "avg_score_rl = np.mean(rl_scores)\n", "print(f'Average Score RL: {avg_score_rl:.2f}')\n", "avg_score_knn = np.mean(knn_scores)\n", "print(f'Average Score KNN: {avg_score_knn:.2f}')" ] }, { "cell_type": "markdown", "metadata": { "id": "Heyo5s87yReP" }, "source": [ "# 6. Save Model" ] }, { "cell_type": "code", "execution_count": 211, "metadata": { "id": "wkNtOOwEyReP" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/opt/homebrew/lib/python3.11/site-packages/stable_baselines3/common/vec_env/patch_gym.py:49: UserWarning: You provided an OpenAI Gym environment. We strongly recommend transitioning to Gymnasium environments. Stable-Baselines3 is automatically wrapping your environments in a compatibility layer, which could potentially cause issues.\n", " warnings.warn(\n", "/opt/homebrew/lib/python3.11/site-packages/stable_baselines3/common/evaluation.py:67: UserWarning: Evaluation environment is not wrapped with a ``Monitor`` wrapper. This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. Consider wrapping environment first with ``Monitor`` wrapper.\n", " warnings.warn(\n", "/opt/homebrew/lib/python3.11/site-packages/stable_baselines3/common/vec_env/base_vec_env.py:243: UserWarning: You tried to call render() but no `render_mode` was passed to the env constructor.\n", " warnings.warn(\"You tried to call render() but no `render_mode` was passed to the env constructor.\")\n" ] }, { "data": { "text/plain": [ "(193.53333333333333, 98.98307374944916)" ] }, "execution_count": 211, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model.save('PPO')\n", "evaluate_policy(model, env, n_eval_episodes=60, render=True)" ] } ], "metadata": { "colab": { "provenance": [] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.7" } }, "nbformat": 4, "nbformat_minor": 0 }