Skip to content
This repository was archived by the owner on May 11, 2024. It is now read-only.

Commit a0acad6

Browse files
Enable setting the port on the server (#54)
* set port for the server * add port number for the server in the example * seal backend add port for server * update examples * do not stop server after one query * Update README.md correct port number for the server
1 parent be835ad commit a0acad6

File tree

8 files changed

+60
-15
lines changed

8 files changed

+60
-15
lines changed

examples/MNIST/README.md

+4-2
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,15 @@ To test the client-server model, in one terminal call
6969
python test.py --backend=HE_SEAL \
7070
--model_file=models/cryptonets.pb \
7171
--enable_client=true \
72-
--encryption_parameters=$HE_TRANSFORMER/configs/he_seal_ckks_config_N13_L8.json
72+
--encryption_parameters=$HE_TRANSFORMER/configs/he_seal_ckks_config_N13_L8.json \
73+
--port 35000
7374
```
7475

7576
In another terminal (with the python environment active), call
7677
```bash
7778
python pyclient_mnist.py --batch_size=1024 \
78-
--encrypt_data_str=encrypt
79+
--encrypt_data_str=encrypt \
80+
--port 35000
7981
```
8082

8183
### Multi-party computation with garbled circuits (GC)

examples/MNIST/mnist_util.py

+7
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,12 @@ def server_argument_parser():
235235
default="import/output/BiasAdd:0",
236236
help="Tensor name of model output",
237237
)
238+
parser.add_argument(
239+
"--port",
240+
type=int,
241+
default=34000,
242+
help="Port number for the server",
243+
)
238244

239245
return parser
240246

@@ -258,6 +264,7 @@ def server_config_from_flags(FLAGS, tensor_param_name):
258264
FLAGS.mask_gc_outputs)).encode()
259265
server_config.parameter_map["num_gc_threads"].s = (str(
260266
FLAGS.num_gc_threads)).encode()
267+
server_config.parameter_map["port"].s = (str(FLAGS.port)).encode()
261268

262269
if FLAGS.enable_client:
263270
server_config.parameter_map[tensor_param_name].s = b"client_input"

examples/README.md

+3-2
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,14 @@ For a simple demonstration of a server-client approach, run
3232
```bash
3333
python $HE_TRANSFORMER/examples/ax.py \
3434
--backend=HE_SEAL \
35-
--enable_client=yes
35+
--enable_client=yes \
36+
--port 35000
3637
```
3738

3839
This will discard the Tensorflow inputs and instead wait for a client to connect and provide encrypted inputs.
3940
To start the client, in a separate terminal on the same host (with the ngraph-tf bridge python environment active), run
4041
```bash
41-
python $HE_TRANSFORMER/examples/pyclient.py
42+
python $HE_TRANSFORMER/examples/pyclient.py --port 35000
4243
```
4344

4445
Once the computation is complete, the output will be returned to the client and decrypted. The server will attempt decrypt the output as well; however, since it does not have the client's secret key, the output will be meaningless.

examples/ax.py

+22-7
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def server_config_from_flags(FLAGS, tensor_param_name):
4444
"encryption_parameters"].s = FLAGS.encryption_parameters.encode()
4545
server_config.parameter_map["enable_client"].s = (str(
4646
FLAGS.enable_client)).encode()
47+
server_config.parameter_map["port"].s = (str(FLAGS.port)).encode()
4748
if FLAGS.enable_client:
4849
server_config.parameter_map[tensor_param_name].s = b"client_input"
4950

@@ -64,18 +65,26 @@ def main(FLAGS):
6465
c = tf.compat.v1.placeholder(tf.float32, shape=(1, 4))
6566
f = c * (a + b)
6667

67-
# Create config to load parameter b from client
68-
config = server_config_from_flags(FLAGS, b.name)
69-
print("config", config)
68+
try:
69+
while True:
70+
# Create config to load parameter b from client
71+
config = server_config_from_flags(FLAGS, b.name)
72+
print("config", config)
7073

71-
with tf.compat.v1.Session(config=config) as sess:
72-
f_val = sess.run(f, feed_dict={b: np.ones((1, 4)), c: np.ones((1, 4))})
73-
print("Result: ", f_val)
74+
with tf.compat.v1.Session(config=config) as sess:
75+
f_val = sess.run(f, feed_dict={b: np.ones((1, 4)), c: np.ones((1, 4))})
76+
print("Result: ", f_val)
77+
except KeyboardInterrupt:
78+
print("Stopping the server.")
7479

7580

7681
if __name__ == "__main__":
7782
parser = argparse.ArgumentParser()
78-
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
83+
parser.add_argument(
84+
"--batch_size",
85+
type=int,
86+
default=1,
87+
help="Batch size")
7988
parser.add_argument(
8089
"--enable_client",
8190
type=str2bool,
@@ -90,6 +99,12 @@ def main(FLAGS):
9099
help=
91100
"Filename containing json description of encryption parameters, or json description itself",
92101
)
102+
parser.add_argument(
103+
"--port",
104+
type=int,
105+
default=34000,
106+
help="Port number for the server",
107+
)
93108

94109
FLAGS, unparsed = parser.parse_known_args()
95110
main(FLAGS)

examples/pyclient.py

+11-3
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,9 @@
2222
def main(FLAGS):
2323
data = (2, 4, 6, 8)
2424

25-
port = 34000
2625
batch_size = 1
2726

28-
client = pyhe_client.HESealClient(FLAGS.hostname, port, batch_size, {
27+
client = pyhe_client.HESealClient(FLAGS.hostname, FLAGS.port, batch_size, {
2928
"client_parameter_name": ("encrypt", data)
3029
})
3130

@@ -36,7 +35,16 @@ def main(FLAGS):
3635
if __name__ == "__main__":
3736
parser = argparse.ArgumentParser()
3837
parser.add_argument(
39-
"--hostname", type=str, default="localhost", help="Hostname of server")
38+
"--hostname",
39+
type=str,
40+
default="localhost",
41+
help="Hostname of server")
42+
parser.add_argument(
43+
"--port",
44+
type=int,
45+
default=34000,
46+
help="Port number of server",
47+
)
4048

4149
FLAGS, unparsed = parser.parse_known_args()
4250

src/seal/he_seal_backend.cpp

+5
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ bool HESealBackend::set_config(const std::map<std::string, std::string>& config,
8383
(void)error; // Avoid unused parameter warning
8484
NGRAPH_HE_LOG(3) << "Setting config";
8585
for (const auto& [option, setting] : config) {
86+
NGRAPH_HE_LOG(3) << "option name: <" << option << ">";
8687
// Check whether client is enabled
8788
if (option == "enable_client") {
8889
bool client_enabled = string_to_bool(setting, false);
@@ -117,6 +118,9 @@ bool HESealBackend::set_config(const std::map<std::string, std::string>& config,
117118
} else {
118119
NGRAPH_HE_LOG(3) << "Not masking garbled circuits outputs from config";
119120
}
121+
} else if (option == "port") {
122+
m_port = flag_to_int(setting.c_str(), 34000);
123+
NGRAPH_HE_LOG(3) << "Setting " << m_port << " port number";
120124
} else {
121125
std::string lower_option = to_lower(option);
122126
std::vector<std::string> lower_settings = split(to_lower(setting), ',');
@@ -128,6 +132,7 @@ bool HESealBackend::set_config(const std::map<std::string, std::string>& config,
128132

129133
static std::unordered_set<std::string> valid_config_settings{
130134
"client_input", "encrypt", "packed", ""};
135+
131136
for (const auto& lower_setting : lower_settings) {
132137
NGRAPH_CHECK(valid_config_settings.find(lower_setting) !=
133138
valid_config_settings.end(),

src/seal/he_seal_backend.hpp

+6
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,11 @@ class HESealBackend : public runtime::Backend {
268268
return m_num_garbled_circuit_threads;
269269
}
270270

271+
/// \brief Returns the port number used for the server
272+
size_t port() const {
273+
return m_port;
274+
}
275+
271276
/// \brief Returns whether or not the garbled circuit inputs should be masked
272277
/// for privacy
273278
bool mask_gc_inputs() const { return m_mask_gc_inputs; }
@@ -328,6 +333,7 @@ class HESealBackend : public runtime::Backend {
328333
bool m_mask_gc_inputs{false};
329334
bool m_mask_gc_outputs{false};
330335
size_t m_num_garbled_circuit_threads{1};
336+
size_t m_port{34000};
331337

332338
bool m_lazy_mod{string_to_bool(std::getenv("LAZY_MOD"), false)};
333339

src/seal/he_seal_executable.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -88,11 +88,12 @@ namespace ngraph::runtime::he {
8888
HESealExecutable::HESealExecutable(const std::shared_ptr<Function>& function,
8989
bool enable_performance_collection,
9090
HESealBackend& he_seal_backend)
91-
: m_he_seal_backend(he_seal_backend), m_batch_size{1}, m_port{34000} {
91+
: m_he_seal_backend(he_seal_backend), m_batch_size{1} {
9292
// TODO(fboemer): Use
9393
(void)enable_performance_collection; // Avoid unused parameter warning
9494

9595
m_context = he_seal_backend.get_context();
96+
m_port = he_seal_backend.port();
9697
m_function = function;
9798

9899
if (!m_context->using_keyswitching()) {

0 commit comments

Comments
 (0)