diff --git a/completion.go b/completion.go
index 015fa2a9f..0d0c1a8f4 100644
--- a/completion.go
+++ b/completion.go
@@ -37,6 +37,12 @@ const (
 	GPT4TurboPreview        = "gpt-4-turbo-preview"
 	GPT4VisionPreview       = "gpt-4-vision-preview"
 	GPT4                    = "gpt-4"
+	GPT4Dot1                = "gpt-4.1"
+	GPT4Dot120250414        = "gpt-4.1-2025-04-14"
+	GPT4Dot1Mini            = "gpt-4.1-mini"
+	GPT4Dot1Mini20250414    = "gpt-4.1-mini-2025-04-14"
+	GPT4Dot1Nano            = "gpt-4.1-nano"
+	GPT4Dot1Nano20250414    = "gpt-4.1-nano-2025-04-14"
 	GPT4Dot5Preview         = "gpt-4.5-preview"
 	GPT4Dot5Preview20250227 = "gpt-4.5-preview-2025-02-27"
 	GPT3Dot5Turbo0125       = "gpt-3.5-turbo-0125"
@@ -121,6 +127,13 @@ var disabledModelsForEndpoints = map[string]map[string]bool{
 		GPT432K:                 true,
 		GPT432K0314:             true,
 		GPT432K0613:             true,
+		O1:                      true,
+		GPT4Dot1:                true,
+		GPT4Dot120250414:        true,
+		GPT4Dot1Mini:            true,
+		GPT4Dot1Mini20250414:    true,
+		GPT4Dot1Nano:            true,
+		GPT4Dot1Nano20250414:    true,
 	},
 	chatCompletionsSuffix: {
 		CodexCodeDavinci002:     true,
diff --git a/completion_test.go b/completion_test.go
index 935bbe864..83bd899a1 100644
--- a/completion_test.go
+++ b/completion_test.go
@@ -181,3 +181,86 @@ func getCompletionBody(r *http.Request) (openai.CompletionRequest, error) {
 	}
 	return completion, nil
 }
+
+// TestCompletionWithO1Model Tests that O1 model is not supported for completion endpoint.
+func TestCompletionWithO1Model(t *testing.T) {
+	config := openai.DefaultConfig("whatever")
+	config.BaseURL = "http://localhost/v1"
+	client := openai.NewClientWithConfig(config)
+
+	_, err := client.CreateCompletion(
+		context.Background(),
+		openai.CompletionRequest{
+			MaxTokens: 5,
+			Model:     openai.O1,
+		},
+	)
+	if !errors.Is(err, openai.ErrCompletionUnsupportedModel) {
+		t.Fatalf("CreateCompletion should return ErrCompletionUnsupportedModel for O1 model, but returned: %v", err)
+	}
+}
+
+// TestCompletionWithGPT4DotModels Tests that newer GPT4 models are not supported for completion endpoint.
+func TestCompletionWithGPT4DotModels(t *testing.T) {
+	config := openai.DefaultConfig("whatever")
+	config.BaseURL = "http://localhost/v1"
+	client := openai.NewClientWithConfig(config)
+
+	models := []string{
+		openai.GPT4Dot1,
+		openai.GPT4Dot120250414,
+		openai.GPT4Dot1Mini,
+		openai.GPT4Dot1Mini20250414,
+		openai.GPT4Dot1Nano,
+		openai.GPT4Dot1Nano20250414,
+		openai.GPT4Dot5Preview,
+		openai.GPT4Dot5Preview20250227,
+	}
+
+	for _, model := range models {
+		t.Run(model, func(t *testing.T) {
+			_, err := client.CreateCompletion(
+				context.Background(),
+				openai.CompletionRequest{
+					MaxTokens: 5,
+					Model:     model,
+				},
+			)
+			if !errors.Is(err, openai.ErrCompletionUnsupportedModel) {
+				t.Fatalf("CreateCompletion should return ErrCompletionUnsupportedModel for %s model, but returned: %v", model, err)
+			}
+		})
+	}
+}
+
+// TestCompletionWithGPT4oModels Tests that GPT4o models are not supported for completion endpoint.
+func TestCompletionWithGPT4oModels(t *testing.T) {
+	config := openai.DefaultConfig("whatever")
+	config.BaseURL = "http://localhost/v1"
+	client := openai.NewClientWithConfig(config)
+
+	models := []string{
+		openai.GPT4o,
+		openai.GPT4o20240513,
+		openai.GPT4o20240806,
+		openai.GPT4o20241120,
+		openai.GPT4oLatest,
+		openai.GPT4oMini,
+		openai.GPT4oMini20240718,
+	}
+
+	for _, model := range models {
+		t.Run(model, func(t *testing.T) {
+			_, err := client.CreateCompletion(
+				context.Background(),
+				openai.CompletionRequest{
+					MaxTokens: 5,
+					Model:     model,
+				},
+			)
+			if !errors.Is(err, openai.ErrCompletionUnsupportedModel) {
+				t.Fatalf("CreateCompletion should return ErrCompletionUnsupportedModel for %s model, but returned: %v", model, err)
+			}
+		})
+	}
+}