diff --git a/bibtex_to_table.py b/bibtex_to_table.py index f51d632..e6c6eb2 100644 --- a/bibtex_to_table.py +++ b/bibtex_to_table.py @@ -224,20 +224,20 @@ def area_plot_analysis(df: pd.DataFrame) -> str: return format_json_data -def preprocess_entry(entry: dict, taxonomy:dict[str, list[str]]) -> None: +def preprocess_entry(entry: dict, taxonomy: dict[str, list[str]]) -> None: entry["ID"] = entry.get("ID", "") if not entry.get("title", ""): - raise ValueError("Title field is missing") + raise ValueError(f"Title field is missing for {entry}") if not entry.get("author", ""): - raise ValueError("Author field is missing") + raise ValueError(f"Author field is missing for {entry}") if not entry.get("year", ""): - raise ValueError("Year field is missing") + raise ValueError(f"Year field is missing for {entry}") if not entry.get("url", ""): - raise ValueError("URL field is missing") + raise ValueError(f"URL field is missing for {entry}") authors_list = entry["author"].split(" and ") authors_processed = [] @@ -261,7 +261,7 @@ def preprocess_entry(entry: dict, taxonomy:dict[str, list[str]]) -> None: # entry["month"] = month_name.capitalize() pass else: - raise ValueError(f"Month field is missing for paper {entry['title']}") + raise ValueError(f"Month field is missing for paper {entry}") # get the venue of the paper if "journal" in entry: diff --git a/components/data/chartData.tsx b/components/data/chartData.tsx index 96530b9..cd6765d 100644 --- a/components/data/chartData.tsx +++ b/components/data/chartData.tsx @@ -1,27 +1,27 @@ export const bar_data = [ { name: 'Text', - overall: 157, + overall: 158, collaboration: 21, competition: 9, mixed_objectives: 51, - text: 157, + text: 158, embodied: 7, virtual: 24, robotics: 5, two_agents: 55, - reinforcement_learning: 20, + reinforcement_learning: 21, agents_with_personas: 23, human: 88, not_applicable: 142, - rule_based: 51, + rule_based: 52, prompting_and_in_context_learning: 46, qualitative: 35, - human_agent: 54, + human_agent: 55, implicit_objectives: 18, more_than_three_agents: 29, finetuning: 35, - agents_with_memory: 15, + agents_with_memory: 16, more_omniscient: 3, pretraining: 18, model_based: 44, @@ -71,7 +71,7 @@ export const bar_data = [ prompting_and_in_context_learning: 16, more_than_three_agents: 10, rule_based: 48, - not_applicable: 35, + not_applicable: 39, finetuning: 17, qualitative: 10, human_agent: 21, @@ -132,29 +132,29 @@ export const area_data = [ human: 1, rule_based: 1, human_agent: 1, - agent_teams: 0, - prompting_and_in_context_learning: 0, - more_omniscient: 0, - fully_omniscient: 0, - education: 0, - more_than_three_agents: 0, - agents_with_personas: 0, - text: 0, more_information_asymmetrical: 0, finetuning: 0, - agents_with_memory: 0, - implicit_objectives: 0, - health: 0, - simulated_humans: 0, - virtual: 0, - competition: 0, + model_based: 0, pretraining: 0, + more_than_three_agents: 0, + more_omniscient: 0, + text: 0, two_agents: 0, - policy: 0, - embodied: 0, + education: 0, + simulated_humans: 0, not_applicable: 0, + health: 0, + agent_teams: 0, + implicit_objectives: 0, + virtual: 0, + embodied: 0, + policy: 0, + prompting_and_in_context_learning: 0, qualitative: 0, - model_based: 0, + agents_with_personas: 0, + fully_omniscient: 0, + competition: 0, + agents_with_memory: 0, }, { name: '2016', @@ -176,51 +176,51 @@ export const area_data = [ more_than_three_agents: 1, model_based: 1, education: 1, + more_information_asymmetrical: 0, + pretraining: 0, + more_omniscient: 0, + simulated_humans: 0, + not_applicable: 0, + health: 0, agent_teams: 0, + implicit_objectives: 0, + policy: 0, prompting_and_in_context_learning: 0, - more_omniscient: 0, fully_omniscient: 0, - more_information_asymmetrical: 0, agents_with_memory: 0, - implicit_objectives: 0, - health: 0, - simulated_humans: 0, - pretraining: 0, - policy: 0, - not_applicable: 0, }, { name: '2017', - text: 1, + text: 2, mixed_objectives: 1, - reinforcement_learning: 3, + reinforcement_learning: 4, two_agents: 2, - agents_with_memory: 1, - rule_based: 4, - human_agent: 2, + agents_with_memory: 2, + rule_based: 5, + human_agent: 3, virtual: 3, not_applicable: 4, competition: 1, robotics: 1, qualitative: 1, human: 1, - collaboration: 0, - agent_teams: 0, - prompting_and_in_context_learning: 0, - more_omniscient: 0, - fully_omniscient: 0, - education: 0, - more_than_three_agents: 0, - agents_with_personas: 0, more_information_asymmetrical: 0, finetuning: 0, - implicit_objectives: 0, - health: 0, - simulated_humans: 0, + model_based: 0, pretraining: 0, - policy: 0, + more_than_three_agents: 0, + more_omniscient: 0, + education: 0, + simulated_humans: 0, + health: 0, + agent_teams: 0, + implicit_objectives: 0, + collaboration: 0, embodied: 0, - model_based: 0, + policy: 0, + prompting_and_in_context_learning: 0, + agents_with_personas: 0, + fully_omniscient: 0, }, { name: '2018', @@ -250,9 +250,9 @@ export const area_data = [ model_based: 1, education: 1, more_omniscient: 1, - agent_teams: 0, more_information_asymmetrical: 0, pretraining: 0, + agent_teams: 0, policy: 0, }, { @@ -279,14 +279,14 @@ export const area_data = [ agent_teams: 1, model_based: 1, health: 1, + more_information_asymmetrical: 0, + pretraining: 0, more_omniscient: 0, - fully_omniscient: 0, education: 0, - more_information_asymmetrical: 0, - agents_with_memory: 0, simulated_humans: 0, - pretraining: 0, policy: 0, + fully_omniscient: 0, + agents_with_memory: 0, }, { name: '2020', @@ -317,8 +317,8 @@ export const area_data = [ policy: 1, education: 1, more_omniscient: 1, - agent_teams: 0, more_information_asymmetrical: 0, + agent_teams: 0, agents_with_memory: 0, }, { @@ -349,10 +349,10 @@ export const area_data = [ more_omniscient: 1, agents_with_personas: 1, prompting_and_in_context_learning: 1, - fully_omniscient: 0, more_information_asymmetrical: 0, - agents_with_memory: 0, pretraining: 0, + fully_omniscient: 0, + agents_with_memory: 0, }, { name: '2022', @@ -384,8 +384,8 @@ export const area_data = [ education: 6, policy: 2, more_omniscient: 2, - fully_omniscient: 0, more_information_asymmetrical: 0, + fully_omniscient: 0, }, { name: '2023', diff --git a/components/papers.tsx b/components/papers.tsx index 35c8db8..c239d32 100644 --- a/components/papers.tsx +++ b/components/papers.tsx @@ -30,6 +30,18 @@ export const data: Paper[] = [ }, +{ title: "Persuasive technology: using computers to change what we think and do", + date: "12/2002", + environments: "n/a", + agents: "n/a", + evaluation: "n/a", + other: "n/a", + url: "https://dl.acm.org/doi/10.1145/764008.763957", + bibtex: "@article{fogg2002persuasive,\n title={Persuasive technology: using computers to change what we think and do},\n author={Fogg, Brian J},\n journal={Ubiquity},\n volume={2002},\n number={December},\n pages={2},\n year={2002},\n month={12},\n publisher={ACM New York, NY, USA},\n url={https://dl.acm.org/doi/10.1145/764008.763957},\n environments = {n/a},\n agents = {n/a},\n evaluation = {n/a},\n other = {n/a},\n}", + subsection: "", +}, + + { title: "Advancing Social Intelligence in AI Agents: Technical Challenges and Open Questions", date: "04/2024", environments: "collaboration, competition, mixed_objectives, text, embodied, virtual, robotics", @@ -438,6 +450,18 @@ export const data: Paper[] = [ }, +{ title: "AI wolf contest\u2014development of game AI using collective intelligence\u2014", + date: "04/2017", + environments: "text", + agents: "reinforcement_learning, agents_with_memory", + evaluation: "rule_based", + other: "human_agent", + url: "https://link.springer.com/chapter/10.1007/978-3-319-57969-6_8", + bibtex: "@inproceedings{toriumi2017ai,\n title={AI wolf contest\u2014development of game AI using collective intelligence\u2014},\n author={Toriumi, Fujio and Osawa, Hirotaka and Inaba, Michimasa and Katagami, Daisuke and Shinoda, Kosuke and Matsubara, Hitoshi},\n booktitle={Computer Games: 5th Workshop on Computer Games, CGW 2016, and 5th Workshop on General Intelligence in Game-Playing Agents, GIGA 2016, Held in Conjunction with the 25th International Conference on Artificial Intelligence, IJCAI 2016, New York, USA, July 9-10, 2016, Revised Selected Papers 5},\n pages={101--115},\n year={2017},\n month={4},\n organization={Springer},\n url={https://link.springer.com/chapter/10.1007/978-3-319-57969-6_8},\n agents = {reinforcement_learning, agents_with_memory},\n environments = {text},\n evaluation = {rule_based},\n other = {human_agent},\n}", + subsection: "environments/language", +}, + + { title: "SEAN: Social Environment for Autonomous Navigation", date: "09/2020", environments: "mixed_objectives, embodied", diff --git a/docs/helper.md b/docs/helper.md index 5486ae3..971e599 100644 --- a/docs/helper.md +++ b/docs/helper.md @@ -6,6 +6,8 @@ [07, 2023] [Communicative Agents for Software Development](https://arxiv.org/abs/2307.07924), Chen Qian et al., arXiv +[12, 2002] [Persuasive technology: using computers to change what we think and do](https://dl.acm.org/doi/10.1145/764008.763957), Brian J Fogg et al., Ubiquity + ### environments/language [3, 2024] [Chatbot Arena: An Open Platform for Evaluating LLMs by Human Preference](https://arxiv.org/abs/2403.04132), Wei-Lin Chiang et al., arXiv preprint arXiv:2403.04132 @@ -62,6 +64,8 @@ [9, 2017] [Deal or No Deal? End-to-End Learning of Negotiation Dialogues](https://aclanthology.org/D17-1259), Lewis et al., Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing +[4, 2017] [AI wolf contest—development of game AI using collective intelligence—](https://link.springer.com/chapter/10.1007/978-3-319-57969-6_8), Fujio Toriumi et al., Computer Games: 5th Workshop on Computer Games, CGW 2016, and 5th Workshop on General Intelligence in Game-Playing Agents, GIGA 2016, Held in Conjunction with the 25th International Conference on Artificial Intelligence, IJCAI 2016, New York, USA, July 9-10, 2016, Revised Selected Papers 5 + [8, 2016] [A persona-based neural conversation model](https://aclanthology.org/P16-1094/), Jiwei Li et al., arXiv preprint arXiv:1603.06155 [11, 2009] [The anatomy of ALICE](https://link.springer.com/chapter/10.1007/978-1-4020-6710-5_13), Richard S Wallace et al., n/a diff --git a/docs/paper_table.md b/docs/paper_table.md index 34cad73..d6495db 100644 --- a/docs/paper_table.md +++ b/docs/paper_table.md @@ -4,6 +4,7 @@ | [Social Skill Training with Large Language Models](https://arxiv.org/abs/2404.04204) | 4, 2024 | ['collaboration', 'competition', 'mixed_objectives', 'text'] | ['two_agents', 'reinforcement_learning', 'agents_with_personas'] | ['human', 'rule_based'] | ['n/a'] | | [Social Intelligence Data Infrastructure: Structuring the Present and Navigating the Future](https://arxiv.org/abs/2403.14659) | 2, 2024 | ['text'] | ['n/a'] | ['human'] | ['n/a'] | | [Communicative Agents for Software Development](https://arxiv.org/abs/2307.07924) | 07, 2023 | ['collaboration', 'embodied'] | ['prompting_and_in_context_learning', 'more_than_three_agents'] | ['rule_based'] | ['n/a'] | +| [Persuasive technology: using computers to change what we think and do](https://dl.acm.org/doi/10.1145/764008.763957) | 12, 2002 | ['n/a'] | ['n/a'] | ['n/a'] | ['n/a'] | | [Chatbot Arena: An Open Platform for Evaluating LLMs by Human Preference](https://arxiv.org/abs/2403.04132) | 3, 2024 | ['text', 'mixed_objectives'] | ['prompting_and_in_context_learning'] | ['qualitative', 'human'] | ['human_agent'] | | [{CALYPSO}: {LLMs} as Dungeon Masters' Assistants](https://arxiv.org/abs/2308.07540) | 8, 2023 | ['text', 'implicit_objectives'] | ['more_than_three_agents', 'finetuning'] | ['human'] | ['human_agent'] | | [{I} Cast Detect Thoughts: Learning to Converse and Guide with Intents and Theory-of-Mind in Dungeons and Dragons](https://aclanthology.org/2023.acl-long.624) | 7, 2023 | ['text', 'implicit_objectives'] | ['more_than_three_agents', 'reinforcement_learning'] | ['human', 'rule_based'] | ['human_agent'] | @@ -31,6 +32,7 @@ | [A knowledge-grounded neural conversation model](https://ojs.aaai.org/index.php/AAAI/article/view/11977) | 4, 2018 | ['text', 'mixed_objectives', 'implicit_objectives'] | ['finetuning'] | ['qualitative', 'human'] | ['human_agent'] | | [Towards empathetic human-robot interactions](https://link.springer.com/chapter/10.1007/978-3-319-75487-1_14) | 3, 2018 | ['text', 'mixed_objectives'] | ['agents_with_personas'] | ['qualitative', 'human'] | ['human_agent'] | | [Deal or No Deal? End-to-End Learning of Negotiation Dialogues](https://aclanthology.org/D17-1259) | 9, 2017 | ['text', 'mixed_objectives'] | ['reinforcement_learning', 'two_agents', 'agents_with_memory'] | ['rule_based'] | ['human_agent'] | +| [AI wolf contest—development of game AI using collective intelligence—](https://link.springer.com/chapter/10.1007/978-3-319-57969-6_8) | 4, 2017 | ['text'] | ['reinforcement_learning', 'agents_with_memory'] | ['rule_based'] | ['human_agent'] | | [A persona-based neural conversation model](https://aclanthology.org/P16-1094/) | 8, 2016 | ['text', 'mixed_objectives'] | ['finetuning', 'agents_with_personas'] | ['qualitative', 'human'] | ['human_agent'] | | [The anatomy of ALICE](https://link.springer.com/chapter/10.1007/978-1-4020-6710-5_13) | 11, 2009 | ['text', 'mixed_objectives'] | ['agents_with_personas'] | ['human'] | ['n/a'] | | [Empathic computing](https://link.springer.com/chapter/10.1007/11825890_3) | 1, 2006 | ['text', 'mixed_objectives'] | ['agents_with_personas'] | ['human'] | ['n/a'] | @@ -388,9 +390,9 @@ | [Ethical challenges in data-driven dialogue systems](https://dl.acm.org/doi/10.1145/3278721.3278723) | 12, 2018 | ['text'] | ['n/a'] | ['n/a'] | ['n/a'] | ### Basic Stats -Total number of papers: 384 +Total number of papers: 386 #### Subsections -environments/language: 31 +environments/language: 32 environments/embodied: 7 environments/virtual: 32 environments/robotics: 30 diff --git a/main.bib b/main.bib index 638aa90..4606c79 100644 --- a/main.bib +++ b/main.bib @@ -15,6 +15,23 @@ @misc{qian2023communicative } ## Papers ### Surveys and Overview +@article{fogg2002persuasive, + title={Persuasive technology: using computers to change what we think and do}, + author={Fogg, Brian J}, + journal={Ubiquity}, + volume={2002}, + number={December}, + pages={2}, + year={2002}, + month={12}, + publisher={ACM New York, NY, USA}, + url={https://dl.acm.org/doi/10.1145/764008.763957}, + environments = {n/a}, + agents = {n/a}, + evaluation = {n/a}, + other = {n/a}, +} + @article{mathur2024advancing, title = {Advancing Social Intelligence in AI Agents: Technical Challenges and Open Questions}, author = {Mathur, Leena and Liang, Paul Pu and Morency, Louis-Philippe}, @@ -604,6 +621,21 @@ @ARTICLE{Shinn2023-tt other = {more_omniscient}, } +@inproceedings{toriumi2017ai, + title={AI wolf contest—development of game AI using collective intelligence—}, + author={Toriumi, Fujio and Osawa, Hirotaka and Inaba, Michimasa and Katagami, Daisuke and Shinoda, Kosuke and Matsubara, Hitoshi}, + booktitle={Computer Games: 5th Workshop on Computer Games, CGW 2016, and 5th Workshop on General Intelligence in Game-Playing Agents, GIGA 2016, Held in Conjunction with the 25th International Conference on Artificial Intelligence, IJCAI 2016, New York, USA, July 9-10, 2016, Revised Selected Papers 5}, + pages={101--115}, + year={2017}, + month={4}, + organization={Springer}, + url={https://link.springer.com/chapter/10.1007/978-3-319-57969-6_8}, + agents = {reinforcement_learning, agents_with_memory}, + environments = {text}, + evaluation = {rule_based}, + other = {human_agent}, +} + #### Embodied Environments @article{environments/embodied,