-
Notifications
You must be signed in to change notification settings - Fork 63
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
7 changed files
with
425 additions
and
8 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# Copyright 2023 The OpenRL Authors. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# https://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
"""""" | ||
|
||
# Use OpenRL to load stable-baselines's model for testing | ||
|
||
import numpy as np | ||
import torch | ||
|
||
from openrl.configs.config import create_config_parser | ||
from openrl.envs.common import make | ||
from openrl.modules.common.ppo_net import PPONet as Net | ||
from openrl.modules.networks.policy_value_network_sb3 import ( | ||
PolicyValueNetworkSB3 as PolicyValueNetwork, | ||
) | ||
from openrl.runners.common import PPOAgent as Agent | ||
|
||
|
||
def evaluation(local_trained_file_path=None): | ||
# begin to test | ||
|
||
cfg_parser = create_config_parser() | ||
cfg = cfg_parser.parse_args(["--config", "ppo.yaml"]) | ||
|
||
# Create an environment for testing and set the number of environments to interact with to 9. Set rendering mode to group_human. | ||
render_mode = "group_human" | ||
render_mode = None | ||
env = make("CartPole-v1", render_mode=render_mode, env_num=9, asynchronous=True) | ||
model_dict = {"model": PolicyValueNetwork} | ||
net = Net( | ||
env, | ||
cfg=cfg, | ||
model_dict=model_dict, | ||
device="cuda" if torch.cuda.is_available() else "cpu", | ||
) | ||
# initialize the trainer | ||
agent = Agent( | ||
net, | ||
) | ||
if local_trained_file_path is not None: | ||
agent.load(local_trained_file_path) | ||
# The trained agent sets up the interactive environment it needs. | ||
agent.set_env(env) | ||
# Initialize the environment and get initial observations and environmental information. | ||
obs, info = env.reset() | ||
done = False | ||
|
||
total_step = 0 | ||
total_reward = 0.0 | ||
while not np.any(done): | ||
# Based on environmental observation input, predict next action. | ||
action, _ = agent.act(obs, deterministic=True) | ||
obs, r, done, info = env.step(action) | ||
total_step += 1 | ||
total_reward += np.mean(r) | ||
if total_step % 50 == 0: | ||
print(f"{total_step}: reward:{np.mean(r)}") | ||
env.close() | ||
print("total step:", total_step) | ||
print("total reward:", total_reward) | ||
|
||
|
||
if __name__ == "__main__": | ||
evaluation() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# Copyright 2023 The OpenRL Authors. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# https://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
"""""" | ||
import numpy as np | ||
from test_model import evaluation | ||
|
||
from openrl.configs.config import create_config_parser | ||
from openrl.envs.common import make | ||
from openrl.envs.wrappers.envpool_wrappers import VecAdapter, VecMonitor | ||
from openrl.modules.common import PPONet as Net | ||
from openrl.modules.common.ppo_net import PPONet as Net | ||
from openrl.runners.common import PPOAgent as Agent | ||
|
||
|
||
def train(): | ||
# create the neural network | ||
cfg_parser = create_config_parser() | ||
cfg = cfg_parser.parse_args() | ||
|
||
# create environment, set environment parallelism to 9 | ||
env = make( | ||
"envpool:Adventure-v5", | ||
render_mode=None, | ||
env_num=9, | ||
asynchronous=False, | ||
env_wrappers=[VecAdapter, VecMonitor], | ||
env_type="gym", | ||
) | ||
|
||
net = Net( | ||
env, | ||
cfg=cfg, | ||
) | ||
# initialize the trainer | ||
agent = Agent(net, use_wandb=False, project_name="envpool:Adventure-v5") | ||
# start training, set total number of training steps to 20000 | ||
agent.train(total_time_steps=20000) | ||
|
||
env.close() | ||
return agent | ||
|
||
|
||
def evaluation(agent): | ||
# begin to test | ||
# Create an environment for testing and set the number of environments to interact with to 9. Set rendering mode to group_human. | ||
render_mode = "group_human" | ||
render_mode = None | ||
env = make("CartPole-v1", render_mode=render_mode, env_num=9, asynchronous=True) | ||
# The trained agent sets up the interactive environment it needs. | ||
agent.set_env(env) | ||
# Initialize the environment and get initial observations and environmental information. | ||
obs, info = env.reset() | ||
done = False | ||
step = 0 | ||
total_step, total_reward = 0, 0 | ||
while not np.any(done): | ||
# Based on environmental observation input, predict next action. | ||
action, _ = agent.act(obs, deterministic=True) | ||
obs, r, done, info = env.step(action) | ||
step += 1 | ||
total_step += 1 | ||
total_reward += np.mean(r) | ||
if step % 50 == 0: | ||
print(f"{step}: reward:{np.mean(r)}") | ||
env.close() | ||
print("total step:", total_step) | ||
print("total reward:", total_reward) | ||
|
||
|
||
if __name__ == "__main__": | ||
agent = train() | ||
evaluation(agent) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# Copyright 2023 The OpenRL Authors. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# https://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
"""""" | ||
from typing import List, Optional, Union | ||
|
||
import envpool | ||
|
||
from openrl.envs.common import build_envs | ||
|
||
|
||
def make_envpool_envs( | ||
id: str, | ||
env_num: int = 1, | ||
render_mode: Optional[Union[str, List[str]]] = None, | ||
**kwargs, | ||
): | ||
assert "env_type" in kwargs | ||
assert kwargs.get("env_type") in ["gym", "dm", "gymnasium"] | ||
# Since render_mode is not supported, we set envpool to True | ||
# so that we can remove render_mode keyword argument from build_envs | ||
assert render_mode is None, "envpool does not support render_mode yet" | ||
kwargs["envpool"] = True | ||
|
||
env_wrappers = kwargs.pop("env_wrappers") | ||
env_fns = build_envs( | ||
make=envpool.make, | ||
id=id, | ||
env_num=env_num, | ||
render_mode=render_mode, | ||
wrappers=env_wrappers, | ||
**kwargs, | ||
) | ||
return env_fns |
Oops, something went wrong.