Skip to content

Commit

Permalink
Add up_if_not to clusters in examples (#999)
Browse files Browse the repository at this point in the history
  • Loading branch information
carolineechen authored Jul 12, 2024
1 parent e1f546a commit 1c7a366
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 6 deletions.
2 changes: 1 addition & 1 deletion examples/llama2-13b-ec2/llama2_ec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def predict(self, prompt_text, **inf_kwargs):
# the script code will run when Runhouse attempts to run code remotely.
# :::
if __name__ == "__main__":
gpu = rh.cluster(name="rh-a10x", instance_type="A10G:1", provider="aws")
gpu = rh.cluster(name="rh-a10x", instance_type="A10G:1", provider="aws").up_if_not()

# Next, we define the environment for our module. This includes the required dependencies that need
# to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote.
Expand Down
5 changes: 3 additions & 2 deletions examples/llama2-fine-tuning-with-lora/llama2_fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,9 @@ def generate(self, query: str, max_length: int = DEFAULT_MAX_LENGTH):
# the script code will run when Runhouse attempts to run code remotely.
# :::
if __name__ == "__main__":
cluster = rh.cluster(name="rh-a10x", instance_type="A10G:1", provider="aws")

cluster = rh.cluster(
name="rh-a10x", instance_type="A10G:1", provider="aws"
).up_if_not()
# Next, we define the environment for our module. This includes the required dependencies that need
# to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote.
# Passing `huggingface` to the `secrets` parameter will load the Hugging Face token we set up earlier.
Expand Down
2 changes: 1 addition & 1 deletion examples/llama3-8b-ec2/llama3_ec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def predict(self, prompt_text, **inf_kwargs):
if __name__ == "__main__":
gpu = rh.cluster(
name="rh-a10x", instance_type="A10G:1", memory="32+", provider="aws"
)
).up_if_not()

# Next, we define the environment for our module. This includes the required dependencies that need
# to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote.
Expand Down
2 changes: 1 addition & 1 deletion examples/llama3-fine-tuning-lora/llama3_fine_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def generate(self, query: str, max_length: int = DEFAULT_MAX_LENGTH):
instance_type="A10G:1",
memory="32+",
provider="aws",
)
).up_if_not()

# Next, we define the environment for our module. This includes the required dependencies that need
# to be installed on the remote machine, as well as any secrets that need to be synced up from local to remote.
Expand Down
2 changes: 1 addition & 1 deletion examples/llama3-vllm-gcp/llama3_vllm_gcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ async def main():
# open_ports=[443], # Expose HTTPS port to public
# server_connection_type="tls", # Specify how runhouse communicates with this cluster
# den_auth=False, # No authentication required to hit this cluster (NOT recommended)
)
).up_if_not()

# We'll set an `autostop_mins` of 30 for this example. If you'd like your cluster to run indefinitely, set `autostop_mins=-1`.
# You can use SkyPilot in the terminal to manage your active clusters with `sky status` and `sky down <cluster_id>`.
Expand Down

0 comments on commit 1c7a366

Please sign in to comment.