diff --git a/api/apps/api_app.py b/api/apps/api_app.py index eb9bd6c520e..c291a4ab173 100644 --- a/api/apps/api_app.py +++ b/api/apps/api_app.py @@ -87,7 +87,8 @@ def token_list(): if not tenants: return get_data_error_result(retmsg="Tenant not found!") - objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=request.args["dialog_id"]) + id = request.args.get("dialog_id", request.args["canvas_id"]) + objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id) return get_json_result(data=[o.to_dict() for o in objs]) except Exception as e: return server_error_response(e) @@ -123,7 +124,8 @@ def stats(): days=7)).strftime("%Y-%m-%d 24:00:00")), request.args.get( "to_date", - datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) + datetime.now().strftime("%Y-%m-%d %H:%M:%S")), + "agent" if request.args.get("canvas_id") else None) res = { "pv": [(o["dt"], o["pv"]) for o in objs], "uv": [(o["dt"], o["uv"]) for o in objs], diff --git a/api/db/services/api_service.py b/api/db/services/api_service.py index 6b2eccf3721..a99f45aea24 100644 --- a/api/db/services/api_service.py +++ b/api/db/services/api_service.py @@ -45,7 +45,7 @@ def append_message(cls, id, conversation): @classmethod @DB.connection_context() - def stats(cls, tenant_id, from_date, to_date): + def stats(cls, tenant_id, from_date, to_date, source=None): return cls.model.select( cls.model.create_date.truncate("day").alias("dt"), peewee.fn.COUNT( @@ -62,5 +62,6 @@ def stats(cls, tenant_id, from_date, to_date): cls.model.thumb_up).alias("thumb_up") ).join(Dialog, on=(cls.model.dialog_id == Dialog.id & Dialog.tenant_id == tenant_id)).where( cls.model.create_date >= from_date, - cls.model.create_date <= to_date + cls.model.create_date <= to_date, + cls.model.source == source ).group_by(cls.model.create_date.truncate("day")).dicts() diff --git a/rag/utils/__init__.py b/rag/utils/__init__.py index e6b7552bcd7..63bb582a706 100644 --- a/rag/utils/__init__.py +++ b/rag/utils/__init__.py @@ -74,10 +74,15 @@ def findMaxTm(fnm): encoder = tiktoken.encoding_for_model("gpt-3.5-turbo") + def num_tokens_from_string(string: str) -> int: """Returns the number of tokens in a text string.""" - num_tokens = len(encoder.encode(string)) - return num_tokens + try: + num_tokens = len(encoder.encode(string)) + return num_tokens + except Exception as e: + pass + return 0 def truncate(string: str, max_len: int) -> int: