diff --git a/README.md b/README.md index 66be37bedc8..c7053431813 100644 --- a/README.md +++ b/README.md @@ -104,14 +104,16 @@ outputs = method.execute([torch.randn(1, 3, 224, 224)]) Module module("model.pte"); auto tensor = make_tensor_ptr({2, 2}, {1.0f, 2.0f, 3.0f, 4.0f}); -auto outputs = module.forward({tensor}); +auto outputs = module.forward(tensor); ``` **[Swift (iOS)](https://docs.pytorch.org/executorch/main/ios-section.html)** ```swift +import ExecuTorch + let module = Module(filePath: "model.pte") -let input = Tensor([1.0, 2.0, 3.0, 4.0]) -let outputs: [Value] = try module.forward([input]) +let input = Tensor([1.0, 2.0, 3.0, 4.0], shape: [2, 2]) +let outputs = try module.forward(input) ``` **[Kotlin (Android)](https://docs.pytorch.org/executorch/main/android-section.html)** @@ -151,6 +153,8 @@ runner->generate("Hello, how are you?", config); **[Swift (iOS)](https://docs.pytorch.org/executorch/main/llm/run-on-ios.html)** ```swift +import ExecuTorchLLM + let runner = TextRunner(modelPath: "llama.pte", tokenizerPath: "tiktoken.bin") try runner.generate("Hello, how are you?", Config { $0.sequenceLength = 128