diff --git a/docs-go/flows.md b/docs-go/flows.md index 12d176acd..95a93fe33 100644 --- a/docs-go/flows.md +++ b/docs-go/flows.md @@ -156,16 +156,22 @@ first. // ... }, ) - genkit.StartFlowServer(":1234") + err := genkit.StartFlowServer(":1234", []string{}) + + // startProdServer always returns a non-nil error: the one returned by + // http.ListenAndServe. } ``` - `StartFlowsServer` starts a `net/http` server that exposes each of the flows - you defined as HTTP endpoints - (for example, `http://localhost:3400/menuSuggestionFlow`). - You can optionally specify the address and port to listen on. If you don't, - the server listens on any address and the port specified by the PORT - environment variable; if that is empty, it uses the default of port 3400. + `StartFlowsServer` starts a `net/http` server that exposes your flows as HTTP + endpoints (for example, `http://localhost:3400/menuSuggestionFlow`). Both + parameters are optional: + + - You can specify the address and port to listen on. If you don't, + the server listens on any address and the port specified by the PORT + environment variable; if that is empty, it uses the default of port 3400. + - You can specify which flows to serve. If you don't, `StartFlowsServer` + serves all of your defined flows. If you want to serve flows on the same host and port as other endpoints, you can call `NewFlowServeMux()` to get a handler for your Genkit flows, which you diff --git a/docs-go/models.md b/docs-go/models.md index 1d70b6a8e..9af6a52b3 100644 --- a/docs-go/models.md +++ b/docs-go/models.md @@ -24,13 +24,7 @@ models. ```go projectID := os.Getenv("GCLOUD_PROJECT") - err := vertexai.Init(context.Background(), vertexai.Config{ - ProjectID: projectID, - Models: []string{ - "gemini-1.5-pro", - "gemini-1.5-flash", - }, - }) + err := vertexai.Init(context.Background(), projectID, "us-central1") ``` Note: Different plugins and models use different methods of @@ -78,7 +72,7 @@ To just call the model: request := ai.GenerateRequest{Messages: []*ai.Message{ {Content: []*ai.Part{ai.NewTextPart("Tell me a joke.")}}, }} - response, err := ai.Generate(context.Background(), gemini15pro, &request, nil) + gemini15pro.Generate(context.Background(), &request, nil) responseText, err := response.Text() fmt.Println(responseText) @@ -114,9 +108,8 @@ Genkit supports chunked streaming of model responses: request := ai.GenerateRequest{Messages: []*ai.Message{ {Content: []*ai.Part{ai.NewTextPart("Tell a long story about robots and ninjas.")}}, }} - response, err := ai.Generate( + response, err := gemini15pro.Generate( context.Background(), - gemini15pro, &request, func(ctx context.Context, grc *ai.GenerateResponseChunk) error { text, err := grc.Text() @@ -147,7 +140,7 @@ If the model supports multimodal input, you can pass image prompts: ai.NewMediaPart("", "data:image/jpeg;base64,"+encodedImage), }}, }} - response, err := ai.Generate(context.Background(), gemini15pro, &request, nil) + gemini15pro.Generate(context.Background(), &request, nil) ``` @@ -186,7 +179,7 @@ it. }, Tools: []*ai.ToolDefinition{myJoke}, } - response, err := ai.Generate(context.Background(), gemini15pro, &request, nil) + gemini15pro.Generate(context.Background(), &request, nil) ``` This will automatically call the tools in order to fulfill the user prompt. @@ -232,7 +225,7 @@ chatbots. } request := ai.GenerateRequest{Messages: history} - response, err := ai.Generate(context.Background(), gemini15pro, &request, nil) + gemini15pro.Generate(context.Background(), &request, nil) ``` When you get a response, add it to the history: @@ -252,7 +245,7 @@ chatbots. }) request := ai.GenerateRequest{Messages: history} - response, err := ai.Generate(context.Background(), gemini15pro, &request, nil) + gemini15pro.Generate(context.Background(), &request, nil) ``` If the model you're using supports the system role, you can use the initial