-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcodec.cpp
105 lines (79 loc) · 3.18 KB
/
codec.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
//
// Created by ROHAN MUKHERJEE on 3/9/20.
//
#include "codec.h"
codec::codec( int data_size_per_batch, int dimension, int num_jsons){
this->data_size_per_batch = data_size_per_batch;
this->dimension = dimension;
// host_db = new database_reader(num_jsons, data_size_per_batch, dimension);
// host_db->read(num_jsons);
// host_db->reorganize();
host_db = new host_database(num_jsons, data_size_per_batch, dimension);
host_db->fill_database();
//TODO: change num_batches -> is same as num_jsons
//gpu_user = new gpu_manager(max_devices, host_db->num_batches, data_size_per_batch, dimension);
//gpu_user->copy_database_to_device(host_db->host_database_B, host_db->host_database_A,
// host_db->host_database_prob_Y
//);
//cpu_user = new cpu_manager(
// host_db->num_batches, host_db->batch_size, dimension, host_db->host_database_B,
// host_db->host_database_A, host_db->host_database_prob_Y
//);
// auto *host_db = new host_database(NUM_BATCHES * DATA_SIZE_PER_BATCH, DIMENSION);
// host_db->fill_database();
}
void codec::shrink_data(int max_jsons, int num_devices){
host_db->shrink(max_jsons);
set_gpu_user(num_devices);
}
void codec::set_gpu_user(int num_devices){
if (gpu_user != NULL)
gpu_user->_free();
gpu_user = new gpu_manager(num_devices, host_db->num_batches, data_size_per_batch, dimension);
gpu_user->copy_database_to_device(host_db->host_database_B, host_db->host_database_A,
host_db->host_database_prob_Y
);
}
void codec::search(float *host_query_B, float *host_query_A){
gpu_user->add_query(host_query_B, host_query_A);
gpu_user->search();
std::vector<std::tuple<int, int, float>> top_prog_ids = gpu_user->top_k();
// dump_json(top_prog_ids);
return;
}
void codec::dump_json(std::vector<std::tuple<int, int, float>> top_prog_ids){
int i=0;
Json::Value op_program_head;
Json::Value op_prog_array(Json::arrayValue);
for(std::tuple<int,int, float> prog_id : top_prog_ids){
int batch_id = std::get<0>(prog_id);
int batch_prog_id = std::get<1>(prog_id);
float prob = std::get<2>(prog_id);
Program* p = host_db->get_program(batch_id, batch_prog_id);
i++;
Json::Value op_prog;
op_prog["Rank"] = i;
op_prog["Probability"] = prob;
op_prog["Body"] = p->get_body();
op_prog_array.append(op_prog);
//std::cout << " Rank :: " << i << std::endl;
//std::cout << " Probability :: " << prob << std::endl;
//std::cout << p->get_body() << std::endl;
}
op_program_head["top_programs"]=op_prog_array;
std::ofstream file_id;
file_id.open("top_programs.json");
Json::StyledWriter styledWriter;
file_id << styledWriter.write(op_program_head);
file_id.close();
}
void codec::verify(float *host_query_B, float *host_query_A){
cpu_user->add_query(host_query_B, host_query_A);
cpu_user->search();
relative_error(cpu_user->get_result(), gpu_user->get_result(), data_size_per_batch);
}
void codec::_free(){
host_db->_free();
gpu_user->_free();
//cpu_user->_free();
}