import pandas as pd
import nltk
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix, make_scorer, precision_recall_fscore_support 
from sklearn.model_selection import StratifiedKFold, cross_val_score, KFold, fit_grid_point
from sklearn.model_selection import ParameterGrid
#seed={555,666,777}
np.random.seed(555)

#train 70%, dev 10%, test 20%
# svm-BOW,LIWC,Topics,All_Features

train=pd.read_csv("../yida_mu/tr1.csv")
dev=pd.read_csv("../yida_mu/de1.csv")
test=pd.read_csv("../yida_mu/te1.csv")
xtrain=train['text']
xdev=dev['text']
xtest=test['text']
ytrain=train['label']
ydev=dev['label']
ytest=test['label']
#xtrain.shape, xdev.shape, xtest.shape


def ALL_SVM(xtrain, ytrain, xdev, ydev, xtest, ytest, params_comb):
    
    p_grid = {"kernel": ['rbf'],
               "C": [10, 100, 1e3, 1e4, 1e5],
               "ngrams_range": [(1,1),(1,2),(1,3),(1,4)]
               }
    params_comb = list(ParameterGrid(p_grid))
    best_params=[]
    
    for p in params_comb:
        vectorizer = TfidfVectorizer(ngram_range=p['ngrams_range'], analyzer='word', 
                max_features=20000, min_df=5, max_df=0.4, lowercase=False)        
        ###
        x_train = vectorizer.fit_transform(xtrain)
        x_dev = vectorizer.transform(xdev)
        clf = SVC(kernel=p['kernel'], C=p['C'], random_state=555)
        clf.fit(x_train,ytrain)
        preds = clf.predict(x_dev)
        
        #print(p)
        #print(f1_score(ydev, preds, average='macro'))
        score_with_parames={"f1_macro":f1_score(ydev, preds, average='macro'), "kernel":p['kernel'], "C": p['C']}
        print(score_with_parames)
        best_params.append(score_with_parames)
    ###       
        print('-----------------')
        
        print('\n\n')
    ###
    best=[]
    for _i in best_params:
        best.append(_i['f1_macro'])
        index=best.index(max(best))
    ###
    print(best_params[index])
    
    vectorizer = TfidfVectorizer(ngram_range=best_params[index]['ngrams_range'], analyzer='word', 
                max_features=20000, min_df=5, max_df=0.4, lowercase=False)
    svc=SVC(kernel=best_params[index]['kernel'], C=best_params[index]['C'], random_state=555)
    ###
    x_tr = vectorizer.fit_transform(xtrain)
    x_te = vectorizer.transform(xtest)
    ###
    svc.fit(x_tr, ytrain)
    ypreds = svc.predict(x_te)
    
    print(precision_recall_fscore_support(ytest, ypreds, average='macro'))
    print(classification_report(ytest, ypreds)) 

ALL_SVM(xtrain, ytrain, xdev, ydev, xtest, ytest, params_comb)