Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up syntax for supported Python versions. #1963

Merged
merged 5 commits into from
May 3, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/resnet/resnet_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def train():
# testing task with the current weights every 200 steps.
acc = ray.get(acc_id)
acc_id = test_actor.accuracy.remote(weight_id, step)
print("Step {0}: {1:.6f}".format(step - 200, acc))
print("Step {}: {:.6f}".format(step - 200, acc))
except KeyboardInterrupt:
pass

Expand Down
2 changes: 1 addition & 1 deletion python/ray/common/test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def test_hashability(self):
x = random_object_id()
y = random_object_id()
{x: y}
set([x, y])
{x, y}


class TestTask(unittest.TestCase):
Expand Down
2 changes: 1 addition & 1 deletion python/ray/dataframe/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def tshift(self):

@property
def groups(self):
return dict([(k, pd.Index(v)) for k, v in self._keys_and_values])
return {k: pd.Index(v) for k, v in self._keys_and_values}

def min(self, **kwargs):
return self._apply_agg_function(lambda df: df.min(**kwargs))
Expand Down
2 changes: 1 addition & 1 deletion python/ray/experimental/tfutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self, loss, sess=None, input_variables=None):
self.sess = sess
queue = deque([loss])
variable_names = []
explored_inputs = set([loss])
explored_inputs = {loss}

# We do a BFS on the dependency graph of the input function to find
# the variables.
Expand Down
14 changes: 7 additions & 7 deletions python/ray/plasma/test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def test_wait(self):
self.client1.seal(obj_id1)
ready, waiting = self.client1.wait(
[obj_id1], timeout=100, num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(set(ready), {obj_id1})
self.assertEqual(waiting, [])

# Test wait if only one object available and only one object waited
Expand All @@ -307,8 +307,8 @@ def test_wait(self):
# Don't seal.
ready, waiting = self.client1.wait(
[obj_id2, obj_id1], timeout=100, num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(set(waiting), set([obj_id2]))
self.assertEqual(set(ready), {obj_id1})
self.assertEqual(set(waiting), {obj_id2})

# Test wait if object is sealed later.
obj_id3 = random_object_id()
Expand All @@ -321,14 +321,14 @@ def finish():
t.start()
ready, waiting = self.client1.wait(
[obj_id3, obj_id2, obj_id1], timeout=1000, num_returns=2)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
self.assertEqual(set(ready), {obj_id1, obj_id3})
self.assertEqual(set(waiting), {obj_id2})

# Test if the appropriate number of objects is shown if some objects
# are not ready.
ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1], 100, 3)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
self.assertEqual(set(ready), {obj_id1, obj_id3})
self.assertEqual(set(waiting), {obj_id2})

# Don't forget to seal obj_id2.
self.client1.seal(obj_id2)
Expand Down
16 changes: 8 additions & 8 deletions python/ray/tune/test/trial_scheduler_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -688,36 +688,36 @@ def assertProduces(fn, values):
# Categorical case
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([3, 8]))
{3, 8})
assertProduces(
lambda: explore({"v": 3}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([3, 4]))
{3, 4})
assertProduces(
lambda: explore({"v": 10}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([8, 10]))
{8, 10})
assertProduces(
lambda: explore({"v": 7}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([3, 4, 8, 10]))
{3, 4, 8, 10})
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 1.0, lambda x: x),
set([3, 4, 8, 10]))
{3, 4, 8, 10})

# Continuous case
assertProduces(
lambda: explore(
{"v": 100}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
set([80, 120]))
{80, 120})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
set([80.0, 120.0]))
{80.0, 120.0})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 1.0,
lambda x: x),
set([10.0, 100.0]))
{10.0, 100.0})

def testYieldsTimeToOtherTrials(self):
pbt, runner = self.basicSetup()
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/trial_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def debug_string(self, max_debug=MAX_DEBUG_TRIALS):
if max_debug == start_num:
break

for local_dir in sorted(set([t.local_dir for t in self._trials])):
for local_dir in sorted({t.local_dir for t in self._trials}):
messages.append("Result logdir: {}".format(local_dir))
for state, trials in sorted(states.items()):
limit = limit_per_state[state]
Expand Down
8 changes: 5 additions & 3 deletions python/ray/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,11 @@ def get_object(self, object_ids):
final_results = self.retrieve_and_deserialize(plain_object_ids, 0)
# Construct a dictionary mapping object IDs that we haven't gotten yet
# to their original index in the object_ids argument.
unready_ids = dict((plain_object_ids[i].binary(), i)
for (i, val) in enumerate(final_results)
if val is plasma.ObjectNotAvailable)
unready_ids = {
plain_object_ids[i].binary(): i
for (i, val) in enumerate(final_results)
if val is plasma.ObjectNotAvailable
}
was_blocked = (len(unready_ids) > 0)
# Try reconstructing any objects we haven't gotten yet. Try to get them
# until at least get_timeout_milliseconds milliseconds passes, then
Expand Down
18 changes: 9 additions & 9 deletions test/actor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -774,7 +774,7 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
node_names = {location for location, gpu_id in locations_and_ids}
self.assertEqual(len(node_names), num_local_schedulers)
location_actor_combinations = []
for node_name in node_names:
Expand Down Expand Up @@ -815,7 +815,7 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors1])
node_names = set([location for location, gpu_id in locations_and_ids])
node_names = {location for location, gpu_id in locations_and_ids}
self.assertEqual(len(node_names), num_local_schedulers)

# Keep track of which GPU IDs are being used for each location.
Expand Down Expand Up @@ -847,9 +847,9 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors2])
self.assertEqual(
node_names,
set([location for location, gpu_id in locations_and_ids]))
self.assertEqual(node_names,
{location
for location, gpu_id in locations_and_ids})
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
Expand Down Expand Up @@ -887,7 +887,7 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
node_names = {location for location, gpu_id in locations_and_ids}
self.assertEqual(len(node_names), 2)
for node_name in node_names:
node_gpu_ids = [
Expand All @@ -896,8 +896,8 @@ def get_location_and_ids(self):
]
self.assertIn(len(node_gpu_ids), [5, 10])
self.assertEqual(
set(node_gpu_ids),
set([(i, ) for i in range(len(node_gpu_ids))]))
set(node_gpu_ids), {(i, )
for i in range(len(node_gpu_ids))})

# Creating a new actor should fail because all of the GPUs are being
# used.
Expand Down Expand Up @@ -1942,7 +1942,7 @@ def method(self):

results = ray.get([result1, result2, result3])
self.assertEqual(results[0], results[2])
self.assertEqual(set(results), set([0, 1]))
self.assertEqual(set(results), {0, 1})

# Make sure that when one actor goes out of scope a new actor is
# created because some resources have been freed up.
Expand Down
6 changes: 3 additions & 3 deletions test/runtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def temp():

# Test sets.
self.assertEqual(ray.get(f.remote(set())), set())
s = set([1, (1, 2, "hi")])
s = {1, (1, 2, "hi")}
self.assertEqual(ray.get(f.remote(s)), s)

# Test types.
Expand Down Expand Up @@ -1317,8 +1317,8 @@ def f():
self.assertEqual(list_of_ids, 10 * [[]])

list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = set([tuple(gpu_ids) for gpu_ids in list_of_ids])
self.assertEqual(set_of_ids, set([(i, ) for i in range(10)]))
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
self.assertEqual(set_of_ids, {(i, ) for i in range(10)})

list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
Expand Down
6 changes: 4 additions & 2 deletions test/stress_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,10 @@ def tearDown(self):
state._initialize_global_state(self.redis_ip_address, self.redis_port)
if os.environ.get('RAY_USE_NEW_GCS', False):
tasks = state.task_table()
local_scheduler_ids = set(
task["LocalSchedulerID"] for task in tasks.values())
local_scheduler_ids = {
task["LocalSchedulerID"]
for task in tasks.values()
}

# Make sure that all nodes in the cluster were used by checking that
# the set of local scheduler IDs that had a task scheduled or submitted
Expand Down