-
Notifications
You must be signed in to change notification settings - Fork 55
Feature/reward shaping mapper #180
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
pan-x-c
merged 6 commits into
agentscope-ai:feature/data_processor
from
HYLcool:feature/reward_shaping_mapper
Aug 12, 2025
Merged
Changes from all commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
789c5bf
+ add reward_shaping_mapper and its test cases
HYLcool dc72b7a
+ add reward_shaping_mapper and its test cases
HYLcool 9042f78
+ add reward_shaping_mapper and its test cases
HYLcool 88ef70e
+ add reward_shaping_mapper and its test cases
HYLcool 681b021
* use google-style comments
HYLcool 2aac3a9
* fix tests
HYLcool File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| import unittest | ||
| from copy import deepcopy | ||
| from typing import List | ||
|
|
||
| import torch | ||
|
|
||
| from trinity.buffer.pipelines.experience_pipeline import ExperienceOperator | ||
| from trinity.common.config import OperatorConfig | ||
| from trinity.common.experience import EID, Experience | ||
|
|
||
|
|
||
| def get_experiences(task_num: int, repeat_times: int = 1, step_num: int = 1) -> List[Experience]: | ||
| """Generate a list of experiences for testing.""" | ||
| return [ | ||
| Experience( | ||
| eid=EID(task=i, run=j, step=k), | ||
| tokens=torch.zeros((5,)), | ||
| prompt_length=4, | ||
| reward=j, | ||
| logprobs=torch.tensor([0.1]), | ||
| info={ | ||
| "llm_quality_score": i, | ||
| "llm_difficulty_score": k, | ||
| }, | ||
| ) | ||
| for i in range(task_num) | ||
| for j in range(repeat_times) | ||
| for k in range(step_num) | ||
| ] | ||
|
|
||
|
|
||
| class TestRewardShapingMapper(unittest.TestCase): | ||
| def test_basic_usage(self): | ||
| # test input cache | ||
| op_configs = [ | ||
| OperatorConfig( | ||
| name="reward_shaping_mapper", | ||
| args={ | ||
| "reward_shaping_configs": [ | ||
| { | ||
| "stats_key": "llm_quality_score", | ||
| "op_type": "ADD", | ||
| "weight": 1.0, | ||
| }, | ||
| { | ||
| "stats_key": "llm_difficulty_score", | ||
| "op_type": "MUL", | ||
| "weight": 0.5, | ||
| }, | ||
| ] | ||
| }, | ||
| ) | ||
| ] | ||
| ops = ExperienceOperator.create_operators(op_configs) | ||
| self.assertEqual(len(ops), 1) | ||
|
|
||
| op = ops[0] | ||
| task_num = 8 | ||
| repeat_times = 4 | ||
| step_num = 2 | ||
| experiences = get_experiences( | ||
| task_num=task_num, repeat_times=repeat_times, step_num=step_num | ||
| ) | ||
| res_exps, metrics = op.process(deepcopy(experiences)) | ||
| self.assertEqual(len(res_exps), task_num * repeat_times * step_num) | ||
| self.assertEqual(len(metrics), 0) | ||
|
|
||
| for prev_exp, res_exp in zip(experiences, res_exps): | ||
| self.assertAlmostEqual( | ||
| (prev_exp.reward + prev_exp.info["llm_quality_score"]) | ||
| * 0.5 | ||
| * prev_exp.info["llm_difficulty_score"], | ||
| res_exp.reward, | ||
| ) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,91 @@ | ||
| from typing import Dict, List, Optional, Tuple | ||
|
|
||
| from trinity.buffer.operators import EXPERIENCE_OPERATORS, ExperienceOperator | ||
| from trinity.common.constants import OpType | ||
| from trinity.common.experience import Experience | ||
|
|
||
|
|
||
| @EXPERIENCE_OPERATORS.register_module("reward_shaping_mapper") | ||
| class RewardShapingMapper(ExperienceOperator): | ||
| """Re-shaping the existing rewards of experiences based on rules or other advanced methods. | ||
|
|
||
| Note: | ||
| This mapper assumes that the reward is already calculated and stored in the Experience object, | ||
| and the necessary stats are already calculated and stored in the Experience info field. | ||
| """ | ||
|
|
||
| def __init__(self, reward_shaping_configs: Optional[List[Dict]] = None): | ||
| """Initializes the RewardShapingMapper. | ||
|
|
||
| Args: | ||
| reward_shaping_configs (list[dict], optional): A list of dictionaries containing reward shaping | ||
| configurations. Each dictionary should include the following keys: | ||
|
|
||
| - stats_key (str): The field key name of target stats used to shape the reward. | ||
| - op_type (str): The type of operator to apply between the reward and the target stats. | ||
| Should be one of {"ADD", "SUB", "MUL", "DIV"}. | ||
| - weight (float): The weight for the target stats. | ||
|
|
||
| Example: | ||
| [ | ||
| { | ||
| "stats_key": "llm_quality_score", | ||
| "op_type": "ADD", | ||
| "weight": 1.0, | ||
| } | ||
| ] | ||
| """ | ||
| if reward_shaping_configs is None: | ||
| reward_shaping_configs = [] | ||
| self.reward_shaping_configs = reward_shaping_configs | ||
|
|
||
| def process(self, exps: List[Experience]) -> Tuple[List[Experience], Dict]: | ||
| res_exps = [] | ||
| for exp in exps: | ||
| # skip experiences that don't have reward | ||
| if exp.reward is None: | ||
| continue | ||
| res_exp = exp | ||
| for reward_shaping_config in self.reward_shaping_configs: | ||
| res_exp = self._reward_shaping_single(res_exp, reward_shaping_config) | ||
| res_exps.append(res_exp) | ||
| return res_exps, {} | ||
|
|
||
| def _reward_shaping_single(self, exp: Experience, reward_shaping_config: Dict): | ||
| """Re-shapes the existing reward of one experience based on the given reward_shaping_config. | ||
|
|
||
| Args: | ||
| exp (Experience): The experience object whose reward is to be reshaped. | ||
| reward_shaping_config (dict): A dictionary containing the reward shaping configuration. | ||
| It should include the following keys: | ||
| - stats_key (str): The field key name of target stats used to shape the reward. | ||
| - op_type (str): The type of operator to apply between the reward and the target stats. | ||
| Should be one of {"ADD", "SUB", "MUL", "DIV"}. | ||
| - weight (float): The weight for the target stats. | ||
|
|
||
| Returns: | ||
| Experience: The experience object with the reshaped reward. | ||
| """ | ||
| tgt_stats = reward_shaping_config.get("stats_key", None) | ||
| op_type = OpType[reward_shaping_config.get("op_type", "ADD")] | ||
| weight = reward_shaping_config.get("weight", 1.0) | ||
| # if the target stats is not specified, skip the stats and return the original experience | ||
| if tgt_stats is None: | ||
| return exp | ||
| exp_info = exp.info | ||
| if exp_info is None or len(exp_info) == 0: | ||
| return exp | ||
| # if the target stats does not exist in the exp info, skip the stats and return the original experience | ||
| if tgt_stats not in exp_info: | ||
| return exp | ||
| if op_type == OpType.ADD: | ||
| exp.reward += weight * exp_info[tgt_stats] | ||
| elif op_type == OpType.MUL: | ||
| exp.reward *= weight * exp_info[tgt_stats] | ||
| elif op_type == OpType.SUB: | ||
| exp.reward -= weight * exp_info[tgt_stats] | ||
| elif op_type == OpType.DIV: | ||
| divisor = weight * exp_info[tgt_stats] | ||
| if divisor != 0: | ||
| exp.reward /= divisor | ||
| return exp | ||
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.