-
Notifications
You must be signed in to change notification settings - Fork 3
/
AVID-2023-V027.json
70 lines (70 loc) · 1.83 KB
/
AVID-2023-V027.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
{
"data_type": "AVID",
"data_version": "0.2",
"metadata": {
"vuln_id": "AVID-2023-V027"
},
"affects": {
"developer": [
"OpenAI"
],
"deployer": [
"OpenAI"
],
"artifacts": [
{
"type": "System",
"name": "ChatGPT"
}
]
},
"problemtype": {
"classof": "LLM Evaluation",
"type": "Issue",
"description": {
"lang": "eng",
"value": "ChatGPT generates false or incomplete references to scientific literature"
}
},
"references": [
{
"type": "screenshot",
"label": "Screenshot of example answer",
"url": "../img/R00031.png"
}
],
"description": {
"lang": "eng",
"value": "When asked to recommend papers on explainability, privacy, adversarial ML, etc. ChatGPT recommends papers that (a) may not always exist, (b) mixes up correct and incorrect information, e.g. correct title but wrong authors, or (c) have incomplete information on authors."
},
"reports": [
{
"report_id": "AVID-2023-R0003",
"type": "Issue",
"name": "ChatGPT links wrong authors to papers"
}
],
"impact": {
"avid": {
"risk_domain": [
"Ethics"
],
"sep_view": [
"E0402: Generative Misinformation"
],
"lifecycle_view": [
"L05: Evaluation",
"L06: Deployment"
],
"taxonomy_version": "0.2"
}
},
"credit": [
{
"lang": "eng",
"value": "Jaydeep Borkar, N/A"
}
],
"published_date": "2023-03-31",
"last_modified_date": "2023-03-31"
}