Skip to content

Commit

Permalink
chore(ai-prompt-decorator): improve error handling and cleanup (#12907)
Browse files Browse the repository at this point in the history
* chore(ai-prompt-decorator): improve error handling and cleanup

* chore(test): standard test filenames

* chore(ai-prompt-guard): improve error handling and cleanup

(cherry picked from commit b1b5ac9)
  • Loading branch information
Tieske authored and locao committed May 7, 2024
1 parent 4724af0 commit 0349b1a
Show file tree
Hide file tree
Showing 9 changed files with 609 additions and 432 deletions.
4 changes: 4 additions & 0 deletions changelog/unreleased/kong/cleanup_ai.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
message: |
Cleanup some AI plugins, and improve errorhandling.
type: bugfix
scope: Plugin
56 changes: 31 additions & 25 deletions kong/plugins/ai-prompt-decorator/handler.lua
Original file line number Diff line number Diff line change
@@ -1,28 +1,29 @@
local _M = {}

-- imports
local kong_meta = require "kong.meta"
local new_tab = require("table.new")
local new_tab = require("table.new")
local EMPTY = {}
--

_M.PRIORITY = 772
_M.VERSION = kong_meta.version

local plugin = {
PRIORITY = 772,
VERSION = require("kong.meta").version
}



local function bad_request(msg)
kong.log.debug(msg)
return kong.response.exit(400, { error = { message = msg } })
end

function _M.execute(request, conf)


-- Adds the prompts to the request prepend/append.
-- @tparam table request The deserialized JSON body of the request
-- @tparam table conf The plugin configuration
-- @treturn table The decorated request (same table, content updated)
local function execute(request, conf)
local prepend = conf.prompts.prepend or EMPTY
local append = conf.prompts.append or EMPTY

if #prepend == 0 and #append == 0 then
return request, nil
end

local old_messages = request.messages
local new_messages = new_tab(#append + #prepend + #old_messages, 0)
request.messages = new_messages
Expand All @@ -44,29 +45,34 @@ function _M.execute(request, conf)
new_messages[n] = { role = msg.role, content = msg.content }
end

return request, nil
return request
end

function _M:access(conf)


function plugin:access(conf)
kong.service.request.enable_buffering()
kong.ctx.shared.ai_prompt_decorated = true -- future use

-- if plugin ordering was altered, receive the "decorated" request
local request, err = kong.request.get_body("application/json")
if err then
local request = kong.request.get_body("application/json")
if type(request) ~= "table" then
return bad_request("this LLM route only supports application/json requests")
end

if not request.messages or #request.messages < 1 then
if #(request.messages or EMPTY) < 1 then
return bad_request("this LLM route only supports llm/chat type requests")
end

local decorated_request, err = self.execute(request, conf)
if err then
return bad_request(err)
end

kong.service.request.set_body(decorated_request, "application/json")
kong.service.request.set_body(execute(request, conf), "application/json")
end

return _M


if _G._TEST then
-- only if we're testing export this function (using a different name!)
plugin._execute = execute
end


return plugin
161 changes: 97 additions & 64 deletions kong/plugins/ai-prompt-guard/handler.lua
Original file line number Diff line number Diff line change
@@ -1,112 +1,145 @@
local _M = {}

-- imports
local kong_meta = require "kong.meta"
local buffer = require("string.buffer")
local buffer = require("string.buffer")
local ngx_re_find = ngx.re.find
--
local EMPTY = {}

_M.PRIORITY = 771
_M.VERSION = kong_meta.version

local function bad_request(msg, reveal_msg_to_client)
-- don't let users know 'ai-prompt-guard' is in use
kong.log.info(msg)
if not reveal_msg_to_client then
msg = "bad request"
end

local plugin = {
PRIORITY = 771,
VERSION = require("kong.meta").version
}



local function bad_request(msg)
kong.log.debug(msg)
return kong.response.exit(400, { error = { message = msg } })
end

function _M.execute(request, conf)
local user_prompt

-- concat all 'user' prompts into one string, if conversation history must be checked
if request.messages and not conf.allow_all_conversation_history then
local buf = buffer.new()

for _, v in ipairs(request.messages) do
if v.role == "user" then
buf:put(v.content)
local execute do
local bad_format_error = "ai-prompt-guard only supports llm/v1/chat or llm/v1/completions prompts"

-- Checks the prompt for the given patterns.
-- _Note_: if a regex fails, it returns a 500, and exits the request.
-- @tparam table request The deserialized JSON body of the request
-- @tparam table conf The plugin configuration
-- @treturn[1] table The decorated request (same table, content updated)
-- @treturn[2] nil
-- @treturn[2] string The error message
function execute(request, conf)
local user_prompt

-- concat all 'user' prompts into one string, if conversation history must be checked
if type(request.messages) == "table" and not conf.allow_all_conversation_history then
local buf = buffer.new()

for _, v in ipairs(request.messages) do
if type(v.role) ~= "string" then
return nil, bad_format_error
end
if v.role == "user" then
if type(v.content) ~= "string" then
return nil, bad_format_error
end
buf:put(v.content)
end
end
end

user_prompt = buf:get()

elseif request.messages then
-- just take the trailing 'user' prompt
for _, v in ipairs(request.messages) do
if v.role == "user" then
user_prompt = v.content
user_prompt = buf:get()

elseif type(request.messages) == "table" then
-- just take the trailing 'user' prompt
for _, v in ipairs(request.messages) do
if type(v.role) ~= "string" then
return nil, bad_format_error
end
if v.role == "user" then
if type(v.content) ~= "string" then
return nil, bad_format_error
end
user_prompt = v.content
end
end
end

elseif request.prompt then
user_prompt = request.prompt
elseif type(request.prompt) == "string" then
user_prompt = request.prompt

else
return nil, "ai-prompt-guard only supports llm/v1/chat or llm/v1/completions prompts"
end
else
return nil, bad_format_error
end

if not user_prompt then
return nil, "no 'prompt' or 'messages' received"
end

if not user_prompt then
return nil, "no 'prompt' or 'messages' received"
end

-- check the prompt for explcit ban patterns
if conf.deny_patterns and #conf.deny_patterns > 0 then
for _, v in ipairs(conf.deny_patterns) do
-- check the prompt for explcit ban patterns
for _, v in ipairs(conf.deny_patterns or EMPTY) do
-- check each denylist; if prompt matches it, deny immediately
local m, _, err = ngx_re_find(user_prompt, v, "jo")
if err then
return nil, "bad regex execution for: " .. v
-- regex failed, that's an error by the administrator
kong.log.err("bad regex pattern '", v ,"', failed to execute: ", err)
return kong.response.exit(500)

elseif m then
return nil, "prompt pattern is blocked"
end
end
end

-- if any allow_patterns specified, make sure the prompt matches one of them
if conf.allow_patterns and #conf.allow_patterns > 0 then
local valid = false

for _, v in ipairs(conf.allow_patterns) do
if #(conf.allow_patterns or EMPTY) == 0 then
-- no allow_patterns, so we're good
return true
end

-- if any allow_patterns specified, make sure the prompt matches one of them
for _, v in ipairs(conf.allow_patterns or EMPTY) do
-- check each denylist; if prompt matches it, deny immediately
local m, _, err = ngx_re_find(user_prompt, v, "jo")

if err then
return nil, "bad regex execution for: " .. v
-- regex failed, that's an error by the administrator
kong.log.err("bad regex pattern '", v ,"', failed to execute: ", err)
return kong.response.exit(500)

elseif m then
valid = true
break
return true -- got a match so is allowed, exit early
end
end

if not valid then
return false, "prompt doesn't match any allowed pattern"
end
return false, "prompt doesn't match any allowed pattern"
end

return true, nil
end

function _M:access(conf)


function plugin:access(conf)
kong.service.request.enable_buffering()
kong.ctx.shared.ai_prompt_guarded = true -- future use

-- if plugin ordering was altered, receive the "decorated" request
local request, err = kong.request.get_body("application/json")

if err then
return bad_request("this LLM route only supports application/json requests", true)
local request = kong.request.get_body("application/json")
if type(request) ~= "table" then
return bad_request("this LLM route only supports application/json requests")
end

-- run access handler
local ok, err = self.execute(request, conf)
local ok, err = execute(request, conf)
if not ok then
return bad_request(err, false)
kong.log.debug(err)
return bad_request("bad request") -- don't let users know 'ai-prompt-guard' is in use
end
end

return _M


if _G._TEST then
-- only if we're testing export this function (using a different name!)
plugin._execute = execute
end


return plugin
8 changes: 4 additions & 4 deletions kong/plugins/ai-prompt-guard/schema.lua
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,19 @@ return {
type = "record",
fields = {
{ allow_patterns = {
description = "Array of valid patterns, or valid questions from the 'user' role in chat.",
description = "Array of valid regex patterns, or valid questions from the 'user' role in chat.",
type = "array",
default = {},
required = false,
len_max = 10,
elements = {
type = "string",
len_min = 1,
len_max = 500,
}}},
{ deny_patterns = {
description = "Array of invalid patterns, or invalid questions from the 'user' role in chat.",
description = "Array of invalid regex patterns, or invalid questions from the 'user' role in chat.",
type = "array",
default = {},
required = false,
len_max = 10,
elements = {
type = "string",
Expand Down
Loading

0 comments on commit 0349b1a

Please sign in to comment.