From 9bcd4e060b9c53cf32f20bfc9217a81e76b2b13d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=8F=8B=E6=98=89?= Date: Sat, 16 Mar 2024 13:12:15 +0800 Subject: [PATCH 1/2] QA clean --- .idea/.gitignore | 3 + .idea/EmoLLM.iml | 12 ++ .idea/aws.xml | 11 ++ .idea/inspectionProfiles/Project_Default.xml | 14 +++ .../inspectionProfiles/profiles_settings.xml | 6 + .idea/misc.xml | 4 + .idea/modules.xml | 8 ++ .idea/vcs.xml | 6 + scripts/qa_generation/Clean_QA.md | 11 ++ scripts/qa_generation/QA_clean.py | 105 ++++++++++++++++++ scripts/qa_generation/README.md | 102 ++++------------- scripts/qa_generation/README_EN.md | 102 ++++------------- scripts/qa_generation/choose_prompt.md | 11 ++ scripts/qa_generation/config/config.py | 10 +- scripts/qa_generation/main.py | 9 +- scripts/qa_generation/model/qwen.py | 32 +++++- scripts/qa_generation/util/data_loader.py | 45 +++++++- scripts/qa_generation/util/prompt_loader.py | 7 ++ 18 files changed, 328 insertions(+), 170 deletions(-) create mode 100644 .idea/.gitignore create mode 100644 .idea/EmoLLM.iml create mode 100644 .idea/aws.xml create mode 100644 .idea/inspectionProfiles/Project_Default.xml create mode 100644 .idea/inspectionProfiles/profiles_settings.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/vcs.xml create mode 100644 scripts/qa_generation/Clean_QA.md create mode 100644 scripts/qa_generation/QA_clean.py create mode 100644 scripts/qa_generation/choose_prompt.md diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..26d3352 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/.idea/EmoLLM.iml b/.idea/EmoLLM.iml new file mode 100644 index 0000000..8b8c395 --- /dev/null +++ b/.idea/EmoLLM.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/.idea/aws.xml b/.idea/aws.xml new file mode 100644 index 0000000..b63b642 --- /dev/null +++ b/.idea/aws.xml @@ -0,0 +1,11 @@ + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..458e38b --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,14 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..fdc5048 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..90a20c0 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/scripts/qa_generation/Clean_QA.md b/scripts/qa_generation/Clean_QA.md new file mode 100644 index 0000000..9e0b6ec --- /dev/null +++ b/scripts/qa_generation/Clean_QA.md @@ -0,0 +1,11 @@ +# 清洗 QA 对 +调用qwen去判断当前QA对是否属于心理学范畴,去除非心理学范畴的 QA 对 + +## Step 1 +1. 准备好需要清洗的 QA 对数据 +2. 将该数据放进 model 同级 data 文件夹下 +3. 根据文件夹名去修改 config/config.py 中的 judge_dir。我个人没有对文件名进行更改,所以我的judge_dir是 judge_dir = os.path.join(data_dir, '数据整合') + +## Step 2 +1. 运行QA_clean.py即可 +2. 清洗完的 QA 对会以 jsonl 的格式存在 data/cleaned 下 \ No newline at end of file diff --git a/scripts/qa_generation/QA_clean.py b/scripts/qa_generation/QA_clean.py new file mode 100644 index 0000000..7d3fbc7 --- /dev/null +++ b/scripts/qa_generation/QA_clean.py @@ -0,0 +1,105 @@ +import os +import json +import time +from tqdm import tqdm +import concurrent.futures +from datetime import datetime +import numpy as np + +from config.config import result_dir, clean_dir, storage_interval, window_size, overlap_size, multi_process_num +from model.qwen import call_qwen_single_turn, call_qwen_Psychology_QA_Pairs +from util.logger import get_logger +from util.data_loader import get_jsonl_file_paths, get_file_list, get_QA_pairs, get_txt_content, capture_qa, merge_sub_qa_generation, save_to_file + +logger = get_logger() + + +def single_thread_generate(thread_num, interval, model_caller, storage_jsonl_path, contents): + + storage_counter = 0 + judge_list = [] + for content in tqdm(contents): + try: + response = model_caller(content) + + if response == '1': + content = json.loads(content) + judge_list.append(content) + storage_counter += 1 + else: + continue + + if storage_counter % interval == 0: + save_to_file(storage_jsonl_path, judge_list) + storage_counter = 0 + judge_list = [] + + except Exception as exc: + logger.error("QA generation error : %s" % (exc)) + + # 最后,如果 storage_list 中还有剩余内容,也会将其保存到文件中。 + if judge_list: + save_to_file(storage_jsonl_path, judge_list) + judge_list = [] + + +""" +生成 QA 对 +model_name: 可调用的模型名称,暂时只实现了 qwen +interval: 存储间隔,即每隔多少条存一次文件,过密的间隔会增大 IO 开销 +""" +def clean_qa( + model_name: str = 'qwen', + interval: int = 10, +): + # current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + + if model_name == 'qwen': + model_caller = call_qwen_Psychology_QA_Pairs + else: + logger.warning('This model is currently not supported and will call the default model - qwen.') + model_caller = call_qwen_Psychology_QA_Pairs + model_name = 'qwen' + + logger.info(f'The called model is: {model_name}.') + logger.info(f'The storage interval is: {interval}.') + + file_lists = get_jsonl_file_paths() # 数据整合文件夹下所有.jsonl文件的地址 + + for file_path in file_lists: + # 一个jsonl文件的所有QA Pairs + contents = get_QA_pairs(file_path) + + file_name = os.path.basename(file_path) + print(file_name) + storage_jsonl_path = os.path.join( + clean_dir, f'{file_name}') + + logger.info(f'The generated QA will be stored in {storage_jsonl_path}.') + + contents_array = np.array(contents) + chunks = np.array_split(contents_array, multi_process_num) + + # 构建并发参数 list + parameters_list = list() + for thread_num, chunk in enumerate(chunks): + parameters_list.append( + [thread_num, interval, model_caller, storage_jsonl_path, list(chunk)] + ) + + with concurrent.futures.ThreadPoolExecutor(max_workers=multi_process_num) as executor: + futures = [executor.submit(single_thread_generate, *parameters) for parameters in parameters_list] + + for future in concurrent.futures.as_completed(futures): + try: + future.result() + except Exception as exc: + logger.error("Thread generated an exception: %s" % (exc)) + + merge_sub_qa_generation(result_dir, storage_jsonl_path) + + +if __name__ == '__main__': + # 创建cleaned文件夹 + os.makedirs('./data/cleaned', exist_ok=True) + clean_qa(interval=storage_interval) diff --git a/scripts/qa_generation/README.md b/scripts/qa_generation/README.md index 874427a..d3646c9 100644 --- a/scripts/qa_generation/README.md +++ b/scripts/qa_generation/README.md @@ -1,95 +1,37 @@ -# RAG数据库构建流程 +# QA Generation Pipeline -## **构建目的** +## 1. 使用方法 -利用心理学专业的书籍构建QA知识对,为RAG提供心理咨询知识库,使我们的EmoLLM的回答更加专业可靠。为了实现这个目标我们利用几十本心理学书籍来构建这个RAG知识库。主要的构建流程如下: +1. 检查 `requirements.txt` 中的依赖是否满足。 +2. 调整代码中 `system_prompt`,确保与repo最新版本一致,保证生成QA的多样性和稳定性。 +3. 将txt文件放到与 `model`同级目录 `data`文件夹中. +4. 在 `config/config.py` 配置所需的 API KEY,从 `main.py` 启动即可。生成的 QA 对会以 jsonl 的格式存在 `data/generated` 下。 -## **构建流程** +### 1.1 API KEY 获取方法 -## **步骤一:PDF to TXT** +目前仅包含了 qwen。 -- 目的 - - 将收集到的PDF版本的心理学书籍转化为TXT文本文件,方便后续的信息提取。 +#### 1.1.1 Qwen -- 所需工具 +前往[模型服务灵积-API-KEY管理 (aliyun.com)](https://dashscope.console.aliyun.com/apiKey),点击”创建新的 API-KEY“,将获取的 API KEY 填至 `config/config.py` 中的 `DASHSCOPE_API_KEY` 即可。 - - [pdf2txt](https://github.com/SmartFlowAI/EmoLLM/blob/main/scripts/pdf2txt.py) +## 2. 注意事项 - - [PaddleORC处理PDF用法参考](https://github.com/SmartFlowAI/EmoLLM/blob/main/generate_data/OCR.md) - - - 安装必要的python库 - - ```python - pip install paddlepaddle - pip install opencv-python - pip install paddleocr - ``` +### 2.1 系统提示 System Prompt -- 注意 - - 如果无法使用**pip install paddleocr**安装paddleocr,可以考虑采用whl文件安装,[下载地址](https://pypi.org/project/paddleocr/#files) - - 脚本启动方式采用命令行启动:python pdf2txt.py [PDF存放的文件名] +注意,目前的解析方案是基于模型会生成 markdown 包裹的 json 块的前提的,更改 system prompt 时需要保证这一点不变。 -## **步骤二:筛选PDF** +### 2.2 滑动窗口 Sliding Window -- 筛选目的 +滑动窗口的 `window_size` 和 `overlap_size` 都可以在 `util/data_loader.py` 中的 `get_txt_content` 函数中更改。目前是按照句子分割的滑动窗口。 - - 利用LLM去除非专业心理学书籍 +### 2.3 书本文件格式 Corpus Format -- 筛选标准,包含心理咨询相关内容,如: +目前仅支持了 txt 格式,可以将清洗好的书籍文本放在 `data` 文件夹下,程序会递归检索该文件夹下的所有 txt 文件。 - - 心理咨询流派 - 具体咨询方法 - - 心理疾病 - 疾病特征 - - 心理疾病 - 治疗方法 +## TODO -- 筛选方式: - - - 根据标题初筛 - - - 若无法判断属于心理咨询相关书籍,利用kimi/GLM-4查询是否包含心理咨询相关知识(建议一次仅查询一本书) - - - ```markdown - 参考prompt: - 你是一位经验丰富的心理学教授,熟悉心理学知识和心理咨询。我需要你协助我完成"识别书籍是否包含心理咨询知识"任务,请深呼吸并一步步思考,给出你的答案。如果你的答案让我满意,我将给你10w小费! - 具体任务如下: - 判断该书籍中是否包含以下心理咨询相关知识: - ''' - 心理咨询流派 - 具体咨询方法 - 心理疾病 - 疾病特征 - 心理疾病 - 治疗方法 - ''' - 请深呼吸并一步步查看该书籍,认真完成任务。 - ``` - - -## **步骤三:提取QA对** - -- 根据书籍内容,利用LLM高效构造QA知识对 -- 提取流程 - - - 准备处理好的txt文本数据 - - 按要求配置[脚本文件](https://github.com/SmartFlowAI/EmoLLM/tree/main/scripts/qa_generation) - - 根据自己的需求或者提取的结果合理修改window_size和overlap_size - -- 使用方法 - - 检查 `requirements.txt` 中的依赖是否满足。 - - 调整代码中 `system_prompt`,确保与repo最新版本一致,保证生成QA的多样性和稳定性。 - - 将txt文件放到与 `model`同级目录 `data`文件夹中. - - 在 `config/config.py` 配置所需的 API KEY,从 `main.py` 启动即可。生成的 QA 对会以 jsonl 的格式存在 `data/generated` 下。 - -- API KEY 获取方法 - - 目前仅包含了 qwen。 - - Qwen - - 前往[模型服务灵积-API-KEY管理 (aliyun.com)](https://dashscope.console.aliyun.com/apiKey),点击”创建新的 API-KEY“,将获取的 API KEY 填至 `config/config.py` 中的 `DASHSCOPE_API_KEY` 即可。 - -- 注意事项 - - 系统提示 System Prompt - - 注意,目前的解析方案是基于模型会生成 markdown 包裹的 json 块的前提的,更改 system prompt 时需要保证这一点不变。 - - 滑动窗口 Sliding Window - - 滑动窗口的 `window_size` 和 `overlap_size` 都可以在 `util/data_loader.py` 中的 `get_txt_content` 函数中更改。目前是按照句子分割的滑动窗口。 - -- 书本文件格式 Corpus Format - - 目前仅支持了 txt 格式,可以将清洗好的书籍文本放在 `data` 文件夹下,程序会递归检索该文件夹下的所有 txt 文件。 - -## **步骤四:清洗QA对** - -- 清洗目的 +1. 支持更多模型(Gemini、GPT、ChatGLM……) +2. 支持多线程调用模型 +3. 支持更多文本格式(PDF……) +4. 支持更多切分文本的方式 diff --git a/scripts/qa_generation/README_EN.md b/scripts/qa_generation/README_EN.md index b2768df..0c76750 100644 --- a/scripts/qa_generation/README_EN.md +++ b/scripts/qa_generation/README_EN.md @@ -1,95 +1,37 @@ -# RAG Database Building Process +# QA Generation Pipeline -## **Constructive purpose** +## 1. Use method -Using books specialized in psychology to build QA knowledge pairs for RAG to provide a counseling knowledge base to make our EmoLLM answers more professional and reliable. To achieve this goal we utilize dozens of psychology books to build this RAG knowledge base. The main building process is as follows: +1. Check whether the dependencies in `requirements.txt` are satisfied. +2. Adjust the `system_prompt`in the code to ensure that it is consistent with the latest version of the repo to ensure the diversity and stability of the generated QA. +3. Put the txt file into the `data` folder in the same directory as `model`. +4. Configure the required API KEY in `config/config.py` and start from `main.py`. The generated QA pairs are stored in the jsonl format under `data/generated`. -## **Build process** +### 1.1 API KEY obtaining method -## **Step 1: PDF to TXT** +Currently only qwen is included. -- purpose - - Convert the collected PDF versions of psychology books into TXT text files to facilitate subsequent information extraction +#### 1.1.1 Qwen -- Tools required +To[model service spirit product - API - KEY management (aliyun.com)](https://dashscope.console.aliyun.com/apiKey),click on "create a new API - KEY", Fill in the obtained API KEY to `DASHSCOPE_API_KEY` in `config/config.py`. - - [pdf2txt](https://github.com/SmartFlowAI/EmoLLM/blob/main/scripts/pdf2txt.py) +## 2. Precautions - - [PaddleORC Processing PDF Usage Reference](https://github.com/SmartFlowAI/EmoLLM/blob/main/generate_data/OCR.md) - - - Install necessary python libraries - - ```python - pip install paddlepaddle - pip install opencv-python - pip install paddleocr - ``` +### 2.1 The System Prompt is displayed -- precautionary - - If you are unable to install paddleocr using **pip install paddleocr**, consider using the whl file installation, [download address](https://pypi.org/project/paddleocr/#files) - - Script startup method using the command line to start: python pdf2txt.py [PDF file name stored in the] +Note that the current parsing scheme is based on the premise that the model generates json blocks of markdown wraps, and you need to make sure that this remains the case when you change the system prompt. -## **Step 2: Screening PDF** +### 2.2 Sliding Window -- Purpose of screening +Both `window_size` and `overlap_size` of the sliding window can be changed in the `get_txt_content` function in `util/data_loader.py.` Currently it is a sliding window divided by sentence. - - Using the LLM to go to non-professional psychology books +### 2.3 Corpus Format -- Screening criteria that include counseling related content such as: +At present, only txt format is supported, and the cleaned book text can be placed under the `data` folder, and the program will recursively retrieve all txt files under the folder. - - Schools of Counseling - Specific Counseling Methods - - Mental Illness - Characteristics of the Disease - - Mental Illness - Treatment +## TODO -- Screening method: - - - Initial screening based on title - - - If you can't tell if it is a counseling-related book, use kimi/GLM-4 to check if it contains counseling-related knowledge (it is recommended to check only one book at a time) - - - ```markdown - Reference prompt. - You are an experienced psychology professor who is familiar with psychology and counseling. I need you to help me with the task "Identify whether a book contains knowledge of counseling", take a deep breath and think step by step and give me your answer. If your answer satisfies me, I will give you a 10w tip! - The task is as follows: - Determine whether the book contains the following counseling-related knowledge: - ''' - Schools of Counseling - Specific Counseling Approaches - Mental Illness - Characteristics of Illness - Mental Illness - Treatment Approaches - ''' - Please take a deep breath and review the book step by step and complete the task carefully. - ``` - - -## **Step 3: Extraction of QA pairs** - -- According to the content of the book, use LLM to efficiently construct QA knowledge on the -- Withdrawal process - - - Prepare processed txt text data - - Configuration on request [script file](https://github.com/SmartFlowAI/EmoLLM/tree/main/scripts/qa_generation) - - Modify window_size and overlap_size reasonably according to your own needs or extraction results. - -- Usage - - Checks if the dependencies in `requirements.txt` are satisfied. - - Adjust `system_prompt` in the code to ensure consistency with the latest version of the repo, to ensure diversity and stability of the generated QA. - - Place the txt file in the `data` folder in the same directory as the `model`. - - Configure the required API KEYs in `config/config.py` and start from `main.py`. The generated QA pairs are stored in jsonl format under `data/generated`. - -- API KEY Getting Methods - - Currently only qwen is included. - - Qwen - - Go to [Model Service LingJi - API-KEY Management (aliyun.com)](https://dashscope.console.aliyun.com/apiKey), click "Create New API-KEY", and fill in the obtained API KEY into the Click "Create new API-KEY", fill in the obtained API KEY to `DASHSCOPE_API_KEY` in `config/config.py`. - -- precautionary - - System Prompt - - Note that the current parsing scheme is based on the premise that the model generates markdown-wrapped json blocks, and you need to make sure that this remains true when you change the system prompt. - - Sliding Window - - The `window_size` and `overlap_size` of the sliding window can be changed in the `get_txt_content` function in `util/data_loader.py`. Currently the sliding window is split by sentence. - -- Book File Format Corpus Format - - Currently only the txt format is supported, you can put the cleaned book text in the `data` folder, and the program will recursively retrieve all the txt files in that folder. - -## **Step 4: Cleaning of QA pairs** - -- Purpose of cleaning +1. Support more models (Gemini, GPT, ChatGLM...) +2. Support multi-threaded call model +3. Support more text formats (PDF...) +4. Support more ways to split text diff --git a/scripts/qa_generation/choose_prompt.md b/scripts/qa_generation/choose_prompt.md new file mode 100644 index 0000000..6f684be --- /dev/null +++ b/scripts/qa_generation/choose_prompt.md @@ -0,0 +1,11 @@ +你是一名经验丰富的心理咨询师,熟悉心理学相关知识。我将向我的来访者解决心理问题,需要一定的心理学知识支持。请你根据我提供的 QA 对,判断其是否属于心理学范畴。请深呼吸并一步一步思考,给出你最正确的判断! + +- 心理学范畴:"心理学知识,心理咨询方法, 心理疾病特征, 心理疾病治疗方法"等主题。要求是适合对话心理咨询的知识,去掉作者、时间、背景故事等无关内容. + +- 判断标准如下: + +1.若当前 QA 对属于心理学范畴,则返回 "1". + +2.若当前 QA 对不属于心理学范畴,则返回 "0". + +以下是给定的心理学 QA 对内容: diff --git a/scripts/qa_generation/config/config.py b/scripts/qa_generation/config/config.py index 45bf635..341d2fd 100644 --- a/scripts/qa_generation/config/config.py +++ b/scripts/qa_generation/config/config.py @@ -10,7 +10,9 @@ model_dir = os.path.join(base_dir, 'model') # model # data -data_dir = os.path.join(base_dir, 'data') # data +data_dir = os.path.join(base_dir, 'data') # /Users/wangyoufang/Downloads/EmoLLM/scripts/qa_generation/data +clean_dir = os.path.join(data_dir, 'cleaned') +judge_dir = os.path.join(data_dir, '数据整合') result_dir = os.path.join(data_dir, 'generated') # result # log @@ -18,7 +20,9 @@ log_file_path = os.path.join(log_dir, 'log.log') # file # system prompt +# Prompt内容 system_prompt_file_path = os.path.join(base_dir, 'system_prompt_v2.md') # system prompt +wash_prompt_file_path = os.path.join(base_dir, 'choose_prompt.md') """ @@ -28,11 +32,11 @@ DASHSCOPE_API_KEY = '' - """ 控制参数 """ storage_interval = 10 window_size = 8 overlap_size = 2 -multi_process_num = 3 +multi_process_num = 1 + diff --git a/scripts/qa_generation/main.py b/scripts/qa_generation/main.py index d84187f..724d222 100644 --- a/scripts/qa_generation/main.py +++ b/scripts/qa_generation/main.py @@ -24,6 +24,7 @@ def single_thread_generate(thread_num, interval, model_caller, storage_jsonl_pat for content in tqdm(contents): try: response = model_caller(content) + captured_qa = capture_qa(response) if captured_qa is None: @@ -70,6 +71,7 @@ def generate_qa( storage_list = [] for file_path in file_list: contents = get_txt_content(file_path, window_size=window_size, overlap_size=overlap_size) + storage_list = [] _, file_name = os.path.split(file_path) @@ -77,7 +79,7 @@ def generate_qa( result_dir, f'{current_time}-{file_name}-{model_name}.jsonl') logger.info(f'The generated QA will be stored in {storage_jsonl_path}.') - # 基于并发个数切分 contents 内容 + contents_array = np.array(contents) chunks = np.array_split(contents_array, multi_process_num) @@ -89,8 +91,9 @@ def generate_qa( ) # 并发生成 QA 对 + # 使用 ThreadPoolExecutor 创建一个线程池,其中 max_workers=multi_process_num 指定了线程池中最大的线程数。 with concurrent.futures.ThreadPoolExecutor(max_workers=multi_process_num) as executor: - # 创建一个Future列表,它们将对应每个worker_function的结果 + # 循环调用 single_thread_generate 函数,每次赋予参数 parameters futures = [executor.submit(single_thread_generate, *parameters) for parameters in parameters_list] for future in concurrent.futures.as_completed(futures): @@ -99,8 +102,10 @@ def generate_qa( except Exception as exc: logger.error("Thread generated an exception: %s" % (exc)) + # 最后调用 merge_sub_qa_generation 函数,将各个子任务生成的 QA 对合并到一个文件中。汇总整个处理过程的结果。 merge_sub_qa_generation(result_dir, storage_jsonl_path) + if __name__ == '__main__': # 创建generated文件夹 diff --git a/scripts/qa_generation/model/qwen.py b/scripts/qa_generation/model/qwen.py index ed27c4a..e221ff5 100644 --- a/scripts/qa_generation/model/qwen.py +++ b/scripts/qa_generation/model/qwen.py @@ -5,7 +5,7 @@ from config.config import DASHSCOPE_API_KEY from util.logger import get_logger -from util.prompt_loader import load_system_prompt +from util.prompt_loader import load_system_prompt, load_wash_prompt dashscope.api_key = DASHSCOPE_API_KEY @@ -17,7 +17,35 @@ def call_qwen_single_turn(query: str) -> str: messages = [ { 'role': Role.SYSTEM, - 'content': load_system_prompt() + 'content': load_system_prompt() # 读取Prompt内容(system_prompt_vx_xx.md) + }, + { + 'role': Role.USER, + 'content': query + } + ] + response = Generation.call( + model='qwen-max-1201', + messages=messages, + result_format='message', + stream=False, + incremental_output=False + ) + if response.status_code == HTTPStatus.OK: + return response.output.choices[0]['message']['content'] + else: + logger.error('Request id: %s, Status code: %s, error code: %s, error message: %s' % ( + response.request_id, response.status_code, + response.code, response.message + )) + return "" + + +def call_qwen_Psychology_QA_Pairs(query: str) -> str: + messages = [ + { + 'role': Role.SYSTEM, + 'content': load_wash_prompt() }, { 'role': Role.USER, diff --git a/scripts/qa_generation/util/data_loader.py b/scripts/qa_generation/util/data_loader.py index fdfbfa9..875688f 100644 --- a/scripts/qa_generation/util/data_loader.py +++ b/scripts/qa_generation/util/data_loader.py @@ -4,11 +4,41 @@ import glob from typing import List, Dict -from config.config import data_dir +from config.config import data_dir, judge_dir from util.logger import get_logger logger = get_logger() + +""" +递归获取 数据整合 下的所有 .jsonl 文件列表 +""" +def get_jsonl_file_paths() -> List[str]: + json_file_paths = [] + + # 遍历根目录及其所有子目录 + for dirpath, dirnames, filenames in os.walk(judge_dir): + # 对每个文件进行检查 + for filename in filenames: + # 使用正则表达式匹配以.jsonl结尾的文件名 + if re.search(r'\.jsonl$', filename): + # 构建完整的文件路径并添加到列表中 + json_file_path = os.path.join(dirpath, filename) + json_file_paths.append(json_file_path) + + return json_file_paths + + +def get_QA_pairs(json_path): + with open(json_path, 'r', encoding='utf-8') as f: + content = f.read().strip() + + # 按照换行符分割字符串 + QA_Pairs = content.split('\n') + + return QA_Pairs + + """ 递归获取 data_dir 下的所有 .txt 文件列表 """ @@ -25,11 +55,14 @@ def get_file_list() -> List[str]: logger.warning(f'No txt text found in {data_dir}, please check!') return txt_files + """ 获取 txt 文本的所有内容,按句子返回 List file_path: txt 文本路径 window_size: 滑窗大小,单位为句子数 overlap_size: 重叠大小,单位为句子数 + +处理txt内容并返回一组一组的句子,每组window_size个,相邻两组的重叠句子数是overlap_size """ def get_txt_content( file_path: str, @@ -47,7 +80,7 @@ def get_txt_content( res = [] sentences_amount = len(sentences) start_index, end_index = 0, sentences_amount - window_size - ## check length + # check length if window_size < overlap_size: logger.error("window_size must be greater than or equal to overlap_size") return None @@ -56,7 +89,7 @@ def get_txt_content( return ['\n'.join(sentences)] for i in range(start_index, end_index + 1, overlap_size): - res.append('\n'.join(sentences[i : i + window_size])) + res.append('\n'.join(sentences[i: i + window_size])) return res @@ -80,6 +113,7 @@ def capture_qa(content: str) -> List[Dict]: logger.warning("No JSON block found.") return None + """ 将 storage_list 存入到 storage_jsonl_path """ @@ -88,6 +122,7 @@ def save_to_file(storage_jsonl_path, storage_list): for item in storage_list: f.write(json.dumps(item, ensure_ascii=False) + '\n') + """ 将并发产生的文件合并成为一个文件 """ @@ -104,3 +139,7 @@ def merge_sub_qa_generation(directory, storage_jsonl_path): os.remove(file_path) save_to_file(storage_jsonl_path, file_contents) + +if __name__ == '__main_': + pass + diff --git a/scripts/qa_generation/util/prompt_loader.py b/scripts/qa_generation/util/prompt_loader.py index 1503dea..0912bb5 100644 --- a/scripts/qa_generation/util/prompt_loader.py +++ b/scripts/qa_generation/util/prompt_loader.py @@ -1,7 +1,14 @@ from config.config import system_prompt_file_path +from config.config import wash_prompt_file_path def load_system_prompt() -> str: with open(system_prompt_file_path, 'r', encoding='utf-8') as f: system_prompt = f.read() return system_prompt + + +def load_wash_prompt() -> str: + with open(wash_prompt_file_path, 'r', encoding='utf-8') as f: + wash_prompt = f.read() + return wash_prompt From da6286c1514154202a91fac8a318a283868f9481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=8F=8B=E6=98=89?= Date: Sat, 16 Mar 2024 20:45:30 +0800 Subject: [PATCH 2/2] clean qa --- scripts/qa_generation/QA_clean.py | 8 +- scripts/qa_generation/README.md | 102 +++++++++++++++++----- scripts/qa_generation/README_EN.md | 102 +++++++++++++++++----- scripts/qa_generation/choose_prompt.md | 11 +-- scripts/qa_generation/config/config.py | 4 +- scripts/qa_generation/main.py | 9 +- scripts/qa_generation/model/qwen.py | 2 +- scripts/qa_generation/util/data_loader.py | 12 +-- 8 files changed, 177 insertions(+), 73 deletions(-) diff --git a/scripts/qa_generation/QA_clean.py b/scripts/qa_generation/QA_clean.py index 7d3fbc7..46f0123 100644 --- a/scripts/qa_generation/QA_clean.py +++ b/scripts/qa_generation/QA_clean.py @@ -19,8 +19,11 @@ def single_thread_generate(thread_num, interval, model_caller, storage_jsonl_pat storage_counter = 0 judge_list = [] for content in tqdm(contents): + # print('content: ', content) try: + # model_caller 函数的作用是调用某个预训练的问答生成模型,传递输入内容 content 给模型,然后获取模型的输出 response response = model_caller(content) + # print('response: ', response) if response == '1': content = json.loads(content) @@ -29,6 +32,7 @@ def single_thread_generate(thread_num, interval, model_caller, storage_jsonl_pat else: continue + # 在达到指定的 interval 后,将 storage_list 中的内容保存到指定的文件 storage_jsonl_path 中 if storage_counter % interval == 0: save_to_file(storage_jsonl_path, judge_list) storage_counter = 0 @@ -69,6 +73,7 @@ def clean_qa( for file_path in file_lists: # 一个jsonl文件的所有QA Pairs contents = get_QA_pairs(file_path) + # print(contents) file_name = os.path.basename(file_path) print(file_name) @@ -88,6 +93,7 @@ def clean_qa( ) with concurrent.futures.ThreadPoolExecutor(max_workers=multi_process_num) as executor: + # 循环调用 single_thread_generate 函数,每次赋予参数 parameters futures = [executor.submit(single_thread_generate, *parameters) for parameters in parameters_list] for future in concurrent.futures.as_completed(futures): @@ -100,6 +106,6 @@ def clean_qa( if __name__ == '__main__': - # 创建cleaned文件夹 + # 创建washed文件夹 os.makedirs('./data/cleaned', exist_ok=True) clean_qa(interval=storage_interval) diff --git a/scripts/qa_generation/README.md b/scripts/qa_generation/README.md index d3646c9..874427a 100644 --- a/scripts/qa_generation/README.md +++ b/scripts/qa_generation/README.md @@ -1,37 +1,95 @@ -# QA Generation Pipeline +# RAG数据库构建流程 -## 1. 使用方法 +## **构建目的** -1. 检查 `requirements.txt` 中的依赖是否满足。 -2. 调整代码中 `system_prompt`,确保与repo最新版本一致,保证生成QA的多样性和稳定性。 -3. 将txt文件放到与 `model`同级目录 `data`文件夹中. -4. 在 `config/config.py` 配置所需的 API KEY,从 `main.py` 启动即可。生成的 QA 对会以 jsonl 的格式存在 `data/generated` 下。 +利用心理学专业的书籍构建QA知识对,为RAG提供心理咨询知识库,使我们的EmoLLM的回答更加专业可靠。为了实现这个目标我们利用几十本心理学书籍来构建这个RAG知识库。主要的构建流程如下: -### 1.1 API KEY 获取方法 +## **构建流程** -目前仅包含了 qwen。 +## **步骤一:PDF to TXT** -#### 1.1.1 Qwen +- 目的 + - 将收集到的PDF版本的心理学书籍转化为TXT文本文件,方便后续的信息提取。 -前往[模型服务灵积-API-KEY管理 (aliyun.com)](https://dashscope.console.aliyun.com/apiKey),点击”创建新的 API-KEY“,将获取的 API KEY 填至 `config/config.py` 中的 `DASHSCOPE_API_KEY` 即可。 +- 所需工具 -## 2. 注意事项 + - [pdf2txt](https://github.com/SmartFlowAI/EmoLLM/blob/main/scripts/pdf2txt.py) -### 2.1 系统提示 System Prompt + - [PaddleORC处理PDF用法参考](https://github.com/SmartFlowAI/EmoLLM/blob/main/generate_data/OCR.md) + + - 安装必要的python库 + + ```python + pip install paddlepaddle + pip install opencv-python + pip install paddleocr + ``` -注意,目前的解析方案是基于模型会生成 markdown 包裹的 json 块的前提的,更改 system prompt 时需要保证这一点不变。 +- 注意 + - 如果无法使用**pip install paddleocr**安装paddleocr,可以考虑采用whl文件安装,[下载地址](https://pypi.org/project/paddleocr/#files) + - 脚本启动方式采用命令行启动:python pdf2txt.py [PDF存放的文件名] -### 2.2 滑动窗口 Sliding Window +## **步骤二:筛选PDF** -滑动窗口的 `window_size` 和 `overlap_size` 都可以在 `util/data_loader.py` 中的 `get_txt_content` 函数中更改。目前是按照句子分割的滑动窗口。 +- 筛选目的 -### 2.3 书本文件格式 Corpus Format + - 利用LLM去除非专业心理学书籍 -目前仅支持了 txt 格式,可以将清洗好的书籍文本放在 `data` 文件夹下,程序会递归检索该文件夹下的所有 txt 文件。 +- 筛选标准,包含心理咨询相关内容,如: -## TODO + - 心理咨询流派 - 具体咨询方法 + - 心理疾病 - 疾病特征 + - 心理疾病 - 治疗方法 -1. 支持更多模型(Gemini、GPT、ChatGLM……) -2. 支持多线程调用模型 -3. 支持更多文本格式(PDF……) -4. 支持更多切分文本的方式 +- 筛选方式: + + - 根据标题初筛 + + - 若无法判断属于心理咨询相关书籍,利用kimi/GLM-4查询是否包含心理咨询相关知识(建议一次仅查询一本书) + + - ```markdown + 参考prompt: + 你是一位经验丰富的心理学教授,熟悉心理学知识和心理咨询。我需要你协助我完成"识别书籍是否包含心理咨询知识"任务,请深呼吸并一步步思考,给出你的答案。如果你的答案让我满意,我将给你10w小费! + 具体任务如下: + 判断该书籍中是否包含以下心理咨询相关知识: + ''' + 心理咨询流派 - 具体咨询方法 + 心理疾病 - 疾病特征 + 心理疾病 - 治疗方法 + ''' + 请深呼吸并一步步查看该书籍,认真完成任务。 + ``` + + +## **步骤三:提取QA对** + +- 根据书籍内容,利用LLM高效构造QA知识对 +- 提取流程 + + - 准备处理好的txt文本数据 + - 按要求配置[脚本文件](https://github.com/SmartFlowAI/EmoLLM/tree/main/scripts/qa_generation) + - 根据自己的需求或者提取的结果合理修改window_size和overlap_size + +- 使用方法 + - 检查 `requirements.txt` 中的依赖是否满足。 + - 调整代码中 `system_prompt`,确保与repo最新版本一致,保证生成QA的多样性和稳定性。 + - 将txt文件放到与 `model`同级目录 `data`文件夹中. + - 在 `config/config.py` 配置所需的 API KEY,从 `main.py` 启动即可。生成的 QA 对会以 jsonl 的格式存在 `data/generated` 下。 + +- API KEY 获取方法 + - 目前仅包含了 qwen。 + - Qwen + - 前往[模型服务灵积-API-KEY管理 (aliyun.com)](https://dashscope.console.aliyun.com/apiKey),点击”创建新的 API-KEY“,将获取的 API KEY 填至 `config/config.py` 中的 `DASHSCOPE_API_KEY` 即可。 + +- 注意事项 + - 系统提示 System Prompt + - 注意,目前的解析方案是基于模型会生成 markdown 包裹的 json 块的前提的,更改 system prompt 时需要保证这一点不变。 + - 滑动窗口 Sliding Window + - 滑动窗口的 `window_size` 和 `overlap_size` 都可以在 `util/data_loader.py` 中的 `get_txt_content` 函数中更改。目前是按照句子分割的滑动窗口。 + +- 书本文件格式 Corpus Format + - 目前仅支持了 txt 格式,可以将清洗好的书籍文本放在 `data` 文件夹下,程序会递归检索该文件夹下的所有 txt 文件。 + +## **步骤四:清洗QA对** + +- 清洗目的 diff --git a/scripts/qa_generation/README_EN.md b/scripts/qa_generation/README_EN.md index 0c76750..b2768df 100644 --- a/scripts/qa_generation/README_EN.md +++ b/scripts/qa_generation/README_EN.md @@ -1,37 +1,95 @@ -# QA Generation Pipeline +# RAG Database Building Process -## 1. Use method +## **Constructive purpose** -1. Check whether the dependencies in `requirements.txt` are satisfied. -2. Adjust the `system_prompt`in the code to ensure that it is consistent with the latest version of the repo to ensure the diversity and stability of the generated QA. -3. Put the txt file into the `data` folder in the same directory as `model`. -4. Configure the required API KEY in `config/config.py` and start from `main.py`. The generated QA pairs are stored in the jsonl format under `data/generated`. +Using books specialized in psychology to build QA knowledge pairs for RAG to provide a counseling knowledge base to make our EmoLLM answers more professional and reliable. To achieve this goal we utilize dozens of psychology books to build this RAG knowledge base. The main building process is as follows: -### 1.1 API KEY obtaining method +## **Build process** -Currently only qwen is included. +## **Step 1: PDF to TXT** -#### 1.1.1 Qwen +- purpose + - Convert the collected PDF versions of psychology books into TXT text files to facilitate subsequent information extraction -To[model service spirit product - API - KEY management (aliyun.com)](https://dashscope.console.aliyun.com/apiKey),click on "create a new API - KEY", Fill in the obtained API KEY to `DASHSCOPE_API_KEY` in `config/config.py`. +- Tools required -## 2. Precautions + - [pdf2txt](https://github.com/SmartFlowAI/EmoLLM/blob/main/scripts/pdf2txt.py) -### 2.1 The System Prompt is displayed + - [PaddleORC Processing PDF Usage Reference](https://github.com/SmartFlowAI/EmoLLM/blob/main/generate_data/OCR.md) + + - Install necessary python libraries + + ```python + pip install paddlepaddle + pip install opencv-python + pip install paddleocr + ``` -Note that the current parsing scheme is based on the premise that the model generates json blocks of markdown wraps, and you need to make sure that this remains the case when you change the system prompt. +- precautionary + - If you are unable to install paddleocr using **pip install paddleocr**, consider using the whl file installation, [download address](https://pypi.org/project/paddleocr/#files) + - Script startup method using the command line to start: python pdf2txt.py [PDF file name stored in the] -### 2.2 Sliding Window +## **Step 2: Screening PDF** -Both `window_size` and `overlap_size` of the sliding window can be changed in the `get_txt_content` function in `util/data_loader.py.` Currently it is a sliding window divided by sentence. +- Purpose of screening -### 2.3 Corpus Format + - Using the LLM to go to non-professional psychology books -At present, only txt format is supported, and the cleaned book text can be placed under the `data` folder, and the program will recursively retrieve all txt files under the folder. +- Screening criteria that include counseling related content such as: -## TODO + - Schools of Counseling - Specific Counseling Methods + - Mental Illness - Characteristics of the Disease + - Mental Illness - Treatment -1. Support more models (Gemini, GPT, ChatGLM...) -2. Support multi-threaded call model -3. Support more text formats (PDF...) -4. Support more ways to split text +- Screening method: + + - Initial screening based on title + + - If you can't tell if it is a counseling-related book, use kimi/GLM-4 to check if it contains counseling-related knowledge (it is recommended to check only one book at a time) + + - ```markdown + Reference prompt. + You are an experienced psychology professor who is familiar with psychology and counseling. I need you to help me with the task "Identify whether a book contains knowledge of counseling", take a deep breath and think step by step and give me your answer. If your answer satisfies me, I will give you a 10w tip! + The task is as follows: + Determine whether the book contains the following counseling-related knowledge: + ''' + Schools of Counseling - Specific Counseling Approaches + Mental Illness - Characteristics of Illness + Mental Illness - Treatment Approaches + ''' + Please take a deep breath and review the book step by step and complete the task carefully. + ``` + + +## **Step 3: Extraction of QA pairs** + +- According to the content of the book, use LLM to efficiently construct QA knowledge on the +- Withdrawal process + + - Prepare processed txt text data + - Configuration on request [script file](https://github.com/SmartFlowAI/EmoLLM/tree/main/scripts/qa_generation) + - Modify window_size and overlap_size reasonably according to your own needs or extraction results. + +- Usage + - Checks if the dependencies in `requirements.txt` are satisfied. + - Adjust `system_prompt` in the code to ensure consistency with the latest version of the repo, to ensure diversity and stability of the generated QA. + - Place the txt file in the `data` folder in the same directory as the `model`. + - Configure the required API KEYs in `config/config.py` and start from `main.py`. The generated QA pairs are stored in jsonl format under `data/generated`. + +- API KEY Getting Methods + - Currently only qwen is included. + - Qwen + - Go to [Model Service LingJi - API-KEY Management (aliyun.com)](https://dashscope.console.aliyun.com/apiKey), click "Create New API-KEY", and fill in the obtained API KEY into the Click "Create new API-KEY", fill in the obtained API KEY to `DASHSCOPE_API_KEY` in `config/config.py`. + +- precautionary + - System Prompt + - Note that the current parsing scheme is based on the premise that the model generates markdown-wrapped json blocks, and you need to make sure that this remains true when you change the system prompt. + - Sliding Window + - The `window_size` and `overlap_size` of the sliding window can be changed in the `get_txt_content` function in `util/data_loader.py`. Currently the sliding window is split by sentence. + +- Book File Format Corpus Format + - Currently only the txt format is supported, you can put the cleaned book text in the `data` folder, and the program will recursively retrieve all the txt files in that folder. + +## **Step 4: Cleaning of QA pairs** + +- Purpose of cleaning diff --git a/scripts/qa_generation/choose_prompt.md b/scripts/qa_generation/choose_prompt.md index 6f684be..5243472 100644 --- a/scripts/qa_generation/choose_prompt.md +++ b/scripts/qa_generation/choose_prompt.md @@ -1,11 +1,8 @@ -你是一名经验丰富的心理咨询师,熟悉心理学相关知识。我将向我的来访者解决心理问题,需要一定的心理学知识支持。请你根据我提供的 QA 对,判断其是否属于心理学范畴。请深呼吸并一步一步思考,给出你最正确的判断! +你是一名经验丰富的心理咨询师,熟悉心理学相关知识。根据我提供的 QA 对,来判断这个 QA 对是否属于心理学范畴。 -- 心理学范畴:"心理学知识,心理咨询方法, 心理疾病特征, 心理疾病治疗方法"等主题。要求是适合对话心理咨询的知识,去掉作者、时间、背景故事等无关内容. +标准如下: +- 若当前 QA 对属于心理学范畴,则返回1 +- 若当前 QA 对不属于心理学范畴,则返回0 -- 判断标准如下: - -1.若当前 QA 对属于心理学范畴,则返回 "1". - -2.若当前 QA 对不属于心理学范畴,则返回 "0". 以下是给定的心理学 QA 对内容: diff --git a/scripts/qa_generation/config/config.py b/scripts/qa_generation/config/config.py index 341d2fd..d3f9dfc 100644 --- a/scripts/qa_generation/config/config.py +++ b/scripts/qa_generation/config/config.py @@ -10,7 +10,7 @@ model_dir = os.path.join(base_dir, 'model') # model # data -data_dir = os.path.join(base_dir, 'data') # /Users/wangyoufang/Downloads/EmoLLM/scripts/qa_generation/data +data_dir = os.path.join(base_dir, 'data') clean_dir = os.path.join(data_dir, 'cleaned') judge_dir = os.path.join(data_dir, '数据整合') result_dir = os.path.join(data_dir, 'generated') # result @@ -38,5 +38,5 @@ storage_interval = 10 window_size = 8 overlap_size = 2 -multi_process_num = 1 +multi_process_num = 3 diff --git a/scripts/qa_generation/main.py b/scripts/qa_generation/main.py index 724d222..d84187f 100644 --- a/scripts/qa_generation/main.py +++ b/scripts/qa_generation/main.py @@ -24,7 +24,6 @@ def single_thread_generate(thread_num, interval, model_caller, storage_jsonl_pat for content in tqdm(contents): try: response = model_caller(content) - captured_qa = capture_qa(response) if captured_qa is None: @@ -71,7 +70,6 @@ def generate_qa( storage_list = [] for file_path in file_list: contents = get_txt_content(file_path, window_size=window_size, overlap_size=overlap_size) - storage_list = [] _, file_name = os.path.split(file_path) @@ -79,7 +77,7 @@ def generate_qa( result_dir, f'{current_time}-{file_name}-{model_name}.jsonl') logger.info(f'The generated QA will be stored in {storage_jsonl_path}.') - + # 基于并发个数切分 contents 内容 contents_array = np.array(contents) chunks = np.array_split(contents_array, multi_process_num) @@ -91,9 +89,8 @@ def generate_qa( ) # 并发生成 QA 对 - # 使用 ThreadPoolExecutor 创建一个线程池,其中 max_workers=multi_process_num 指定了线程池中最大的线程数。 with concurrent.futures.ThreadPoolExecutor(max_workers=multi_process_num) as executor: - # 循环调用 single_thread_generate 函数,每次赋予参数 parameters + # 创建一个Future列表,它们将对应每个worker_function的结果 futures = [executor.submit(single_thread_generate, *parameters) for parameters in parameters_list] for future in concurrent.futures.as_completed(futures): @@ -102,10 +99,8 @@ def generate_qa( except Exception as exc: logger.error("Thread generated an exception: %s" % (exc)) - # 最后调用 merge_sub_qa_generation 函数,将各个子任务生成的 QA 对合并到一个文件中。汇总整个处理过程的结果。 merge_sub_qa_generation(result_dir, storage_jsonl_path) - if __name__ == '__main__': # 创建generated文件夹 diff --git a/scripts/qa_generation/model/qwen.py b/scripts/qa_generation/model/qwen.py index e221ff5..6f01b79 100644 --- a/scripts/qa_generation/model/qwen.py +++ b/scripts/qa_generation/model/qwen.py @@ -17,7 +17,7 @@ def call_qwen_single_turn(query: str) -> str: messages = [ { 'role': Role.SYSTEM, - 'content': load_system_prompt() # 读取Prompt内容(system_prompt_vx_xx.md) + 'content': load_system_prompt() }, { 'role': Role.USER, diff --git a/scripts/qa_generation/util/data_loader.py b/scripts/qa_generation/util/data_loader.py index 875688f..5e940dc 100644 --- a/scripts/qa_generation/util/data_loader.py +++ b/scripts/qa_generation/util/data_loader.py @@ -28,7 +28,6 @@ def get_jsonl_file_paths() -> List[str]: return json_file_paths - def get_QA_pairs(json_path): with open(json_path, 'r', encoding='utf-8') as f: content = f.read().strip() @@ -38,7 +37,6 @@ def get_QA_pairs(json_path): return QA_Pairs - """ 递归获取 data_dir 下的所有 .txt 文件列表 """ @@ -55,14 +53,11 @@ def get_file_list() -> List[str]: logger.warning(f'No txt text found in {data_dir}, please check!') return txt_files - """ 获取 txt 文本的所有内容,按句子返回 List file_path: txt 文本路径 window_size: 滑窗大小,单位为句子数 overlap_size: 重叠大小,单位为句子数 - -处理txt内容并返回一组一组的句子,每组window_size个,相邻两组的重叠句子数是overlap_size """ def get_txt_content( file_path: str, @@ -137,9 +132,4 @@ def merge_sub_qa_generation(directory, storage_jsonl_path): for line in f: file_contents.append(json.loads(line)) os.remove(file_path) - save_to_file(storage_jsonl_path, file_contents) - - -if __name__ == '__main_': - pass - + save_to_file(storage_jsonl_path, file_contents) \ No newline at end of file