From 8f478f39a5f2d43b88b5ec7081e5f5b478ed8553 Mon Sep 17 00:00:00 2001 From: Caroline Date: Fri, 12 Jul 2024 17:25:47 -0400 Subject: [PATCH] add up_if_not to clusters in examples --- examples/llama2-13b-ec2/llama2_ec2.py | 2 +- examples/llama2-fine-tuning-with-lora/llama2_fine_tuning.py | 5 +++-- examples/llama3-8b-ec2/llama3_ec2.py | 2 +- examples/llama3-fine-tuning-lora/llama3_fine_tuning.py | 2 +- examples/llama3-vllm-gcp/llama3_vllm_gcp.py | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/examples/llama2-13b-ec2/llama2_ec2.py b/examples/llama2-13b-ec2/llama2_ec2.py index f91a3e3b8..658427841 100644 --- a/examples/llama2-13b-ec2/llama2_ec2.py +++ b/examples/llama2-13b-ec2/llama2_ec2.py @@ -92,7 +92,7 @@ def predict(self, prompt_text, **inf_kwargs): # the script code will run when Runhouse attempts to run code remotely. # ::: if __name__ == "__main__": - gpu = rh.cluster(name="rh-a10x", instance_type="A10G:1", provider="aws") + gpu = rh.cluster(name="rh-a10x", instance_type="A10G:1", provider="aws").up_if_not() # Next, we define the environment for our module. This includes the required dependencies that need # to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote. diff --git a/examples/llama2-fine-tuning-with-lora/llama2_fine_tuning.py b/examples/llama2-fine-tuning-with-lora/llama2_fine_tuning.py index 92ff7e1bb..ad8a6ffd8 100644 --- a/examples/llama2-fine-tuning-with-lora/llama2_fine_tuning.py +++ b/examples/llama2-fine-tuning-with-lora/llama2_fine_tuning.py @@ -220,8 +220,9 @@ def generate(self, query: str, max_length: int = DEFAULT_MAX_LENGTH): # the script code will run when Runhouse attempts to run code remotely. # ::: if __name__ == "__main__": - cluster = rh.cluster(name="rh-a10x", instance_type="A10G:1", provider="aws") - + cluster = rh.cluster( + name="rh-a10x", instance_type="A10G:1", provider="aws" + ).up_if_not() # Next, we define the environment for our module. This includes the required dependencies that need # to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote. # Passing `huggingface` to the `secrets` parameter will load the Hugging Face token we set up earlier. diff --git a/examples/llama3-8b-ec2/llama3_ec2.py b/examples/llama3-8b-ec2/llama3_ec2.py index f34ac2adb..35ea6a608 100644 --- a/examples/llama3-8b-ec2/llama3_ec2.py +++ b/examples/llama3-8b-ec2/llama3_ec2.py @@ -108,7 +108,7 @@ def predict(self, prompt_text, **inf_kwargs): if __name__ == "__main__": gpu = rh.cluster( name="rh-a10x", instance_type="A10G:1", memory="32+", provider="aws" - ) + ).up_if_not() # Next, we define the environment for our module. This includes the required dependencies that need # to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote. diff --git a/examples/llama3-fine-tuning-lora/llama3_fine_tuning.py b/examples/llama3-fine-tuning-lora/llama3_fine_tuning.py index 51bb4023f..3907cf0fe 100644 --- a/examples/llama3-fine-tuning-lora/llama3_fine_tuning.py +++ b/examples/llama3-fine-tuning-lora/llama3_fine_tuning.py @@ -245,7 +245,7 @@ def generate(self, query: str, max_length: int = DEFAULT_MAX_LENGTH): instance_type="A10G:1", memory="32+", provider="aws", - ) + ).up_if_not() # Next, we define the environment for our module. This includes the required dependencies that need # to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote. diff --git a/examples/llama3-vllm-gcp/llama3_vllm_gcp.py b/examples/llama3-vllm-gcp/llama3_vllm_gcp.py index d68cce4e3..92e07842d 100644 --- a/examples/llama3-vllm-gcp/llama3_vllm_gcp.py +++ b/examples/llama3-vllm-gcp/llama3_vllm_gcp.py @@ -106,7 +106,7 @@ async def main(): # open_ports=[443], # Expose HTTPS port to public # server_connection_type="tls", # Specify how runhouse communicates with this cluster # den_auth=False, # No authentication required to hit this cluster (NOT recommended) - ) + ).up_if_not() # We'll set an `autostop_mins` of 30 for this example. If you'd like your cluster to run indefinitely, set `autostop_mins=-1`. # You can use SkyPilot in the terminal to manage your active clusters with `sky status` and `sky down `.