-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
60 lines (48 loc) · 2.05 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import pandas as pd
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from torch.utils.data import DataLoader
from datasets import load_dataset
from transformers import AutoTokenizer, BertModel
import random
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from sklearn import metrics
from multitok import *
from BERT import *
from multitok_BERT import *
from multitok_freq import *
from random import *
from model import *
#SSt
# dataset = load_dataset("glue", 'sst2')
# train_sentences = dataset['train']['sentence']
# train_labels = dataset['train']['label']
# test_sentences = dataset['validation']['sentence']
# test_labels = dataset['validation']['label']
#IMDB
dataset = load_dataset("imdb")
train_sentences = dataset['train']['text']
train_labels = dataset['train']['label']
test_sentences = dataset['test']['text']
test_labels = dataset['test']['label']
#Spam
# dataset = load_dataset("sms_spam")
# train_sentences = dataset['train']['sms'][0:5000]
# train_labels = dataset['train']['label'][0:5000]
# test_sentences = dataset['train']['sms'][5000:]
# test_labels = dataset['train']['label'][5000:]
X1, Y, loader, test_X1, test_Y, vocab_size = random_tokens()
train_eval(X1, Y, loader, test_X1, test_Y, vocab_size, 1, 30)
X1, Y, loader, test_X1, test_Y, vocab_size = bert_tokens(train_sentences, train_labels, test_sentences, test_labels)
train_eval(X1, Y, loader, test_X1, test_Y, vocab_size, 1, 30)
X1, Y, loader, test_X1, test_Y, vocab_size = multitok_tokens(train_sentences, train_labels, test_sentences, test_labels, 2, 1)
train_eval(X1, Y, loader, test_X1, test_Y, vocab_size, 2, 15)
X1, Y, loader, test_X1, test_Y, vocab_size = bert_multitok_tokens(train_sentences, train_labels, test_sentences, test_labels, 2, 1)
train_eval(X1, Y, loader, test_X1, test_Y, vocab_size, 1, 15)
X1, Y, loader, test_X1, test_Y, vocab_size = multitok_freq_tokens(train_sentences, train_labels, test_sentences, test_labels, 2, 1)
train_eval(X1, Y, loader, test_X1, test_Y, vocab_size, 1, 10)