import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from tensorflow.keras import layers, models, optimizers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import BinaryCrossentropy, SparseCategoricalCrossentropy
from sklearn.utils import class_weight
import random
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix

def load_data():
    X = np.random.rand(1000, 10)
    y = np.random.randint(0, 2, size=(1000,))
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    X_train = X_train.reshape((-1, 10, 10, 1))
    X_test = X_test.reshape((-1, 10, 10, 1))
    return X_train, X_test, y_train, y_test

def compute_weights(y_train):
    class_weights = compute_class_weight('balanced', classes=np.unique(y_train), y=y_train)
    class_weight_dict = dict(enumerate(class_weights))
    return class_weight_dict

def build_cnn(input_shape, num_classes):
    model = models.Sequential([
        layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
        layers.MaxPooling2D((2, 2)),
        layers.Conv2D(64, (3, 3), activation='relu'),
        layers.Flatten(),
        layers.Dense(64, activation='relu'),
        layers.Dense(num_classes, activation='softmax')
    ])
    return model

class ICWGANInverter(tf.keras.Model):
    def __init__(self, input_shape, num_classes):
        super(ICWGANInverter, self).__init__()
        self.generator = self.build_generator(input_shape, num_classes)
        self.discriminator = self.build_discriminator(num_classes)

    def build_generator(self, input_shape, num_classes):

        noise_input = layers.Input(shape=input_shape)
        condition_input = layers.Input(shape=(num_classes,))
        x = layers.Concatenate()([noise_input, condition_input])
        x = layers.Dense(128, activation='relu')(x)
        x = layers.Dense(np.prod(input_shape), activation='sigmoid')(x)
        x = layers.Reshape(input_shape)(x)
        return models.Model([noise_input, condition_input], x)

    def build_discriminator(self, num_classes):

        input_data = layers.Input(shape=(10,))
        x = layers.Dense(40, activation='relu')(input_data)
        x = layers.Dense(20, activation='relu')(x)
        x = layers.Dense(10, activation='relu')(x)
        validity = layers.Dense(1, activation='sigmoid')(x)
        label = layers.Dense(num_classes, activation='softmax')(x)
        return models.Model(input_data, [validity, label])

    def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):
        super(ICWGANInverter, self).compile()
        self.d_optimizer = d_optimizer
        self.g_optimizer = g_optimizer
        self.d_loss_fn = d_loss_fn
        self.g_loss_fn = g_loss_fn

def genetic_algorithm(population, fitness_func, num_generations, mutation_rate):
    for generation in range(num_generations):
        print("Generation :", generation)
        
        fitness = [fitness_func(individual) for individual in population]
        parents = selection(population, fitness)
        next_generation = crossover(parents)
        next_generation = mutate(next_generation, mutation_rate)
        population = next_generation
        
    return sorted(zip(population, fitness), key=lambda x: x[1], reverse=True)[0]

def selection(population, fitness):
    tournament_size = 3
    parents = []
    for _ in range(len(population)):
        participants = random.sample(list(zip(population, fitness)), tournament_size)
        winner = max(participants, key=lambda x: x[1])[0]
        parents.append(winner)
    return parents

def crossover(parents):
    crossover_point = random.randint(1, len(parents[0]))
    children = []
    for i in range(0, len(parents), 2):
        parent1 = parents[i]
        parent2 = parents[min(i + 1, len(parents) - 1)]  # Ensure index is within bounds
        child1 = parent1[:crossover_point] + parent2[crossover_point:]
        child2 = parent2[:crossover_point] + parent1[crossover_point:]
        children.extend([child1, child2])
    return children

def mutate(population, mutation_rate):
    mutated_population = []
    for individual in population:
        mutated_individual = []
        for gene in individual:
            if random.random() < mutation_rate:
                mutated_gene = 1 - gene
            else:
                mutated_gene = gene
            mutated_individual.append(mutated_gene)
        mutated_population.append(mutated_individual)
    return mutated_population

def evaluate_model(model, X_test, y_test):
    predictions = model.predict(X_test)
    if len(predictions.shape) > 1 and predictions.shape[1] > 1:
        predictions = np.argmax(predictions, axis=1)
    else:
        predictions = np.round(predictions).astype(int)
    
    acc = accuracy_score(y_test, predictions)
    report = classification_report(y_test, predictions)
    confusion = confusion_matrix(y_test, predictions)
    
    print("Accuracy:", acc)
    print("Classification Report:\n", report)
    print("Confusion Matrix:\n", confusion)

def main():
    X_train, X_test, y_train, y_test = load_data()
    class_weight_dict = compute_weights(y_train)

    model = ICWGANInverter(input_shape=(10,), num_classes=2)
    optimizer = Adam(learning_rate=0.0002, beta_1=0.5)  
    model.generator.compile(optimizer=optimizer, loss='binary_crossentropy')
    model.discriminator.compile(optimizer=optimizer, loss=[BinaryCrossentropy(), SparseCategoricalCrossentropy()])

    model.generator.fit([X_train, y_train], X_train, epochs=10, batch_size=32, sample_weight=class_weight_dict)
    model.discriminator.fit(X_train, [y_train, X_train], epochs=10, batch_size=32, class_weight=class_weight_dict)

    evaluate_model(model, X_test, y_test)

    input_shape=(10, 10, 1)  
    num_classes=2
    cnn_model = build_cnn(input_shape, num_classes)
    
    optimizer = Adam(learning_rate=0.0002, beta_1=0.5) 
    cnn_model.compile(optimizer=optimizer,
                     loss='sparse_categorical_crossentropy',
                     metrics=['accuracy'])

    cnn_model.fit(X_train, y_train, epochs=10, batch_size=32, class_weight=class_weight_dict)
    evaluate_model(cnn_model, X_test, y_test)

if __name__ == "__main__":
    main()
