-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path5_oa_unpaywall_import.py
298 lines (179 loc) · 5.91 KB
/
5_oa_unpaywall_import.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
#!/usr/bin/env python
# coding: utf-8
# # Open Access status for WoS DOIs
#
# Notebook used for the study presented at the EAHIL 2022 conference
#
# * Title: Computational assistance in the analysis of cited references in biomedical literature: a case study from two institutions.
#
# * Authors:
# * Teresa Lee, Knowledge Manager, International Agency for Research on Cancer (IARC/WHO) leet@iarc.fr
# * Pablo Iriarte, IT Coordinator, Library of the University of Geneva Pablo.Iriarte@unige.ch
# * Floriane Muller, Librarian (Medical Library), Library of the University of Geneva Floriane.Muller@unige.ch
# * Ramon Cierco Jimenez, Doctoral Student, International Agency for Research on Cancer (IARC/WHO) CiercoR@students.iarc.fr
#
#
# ### Prerequisites
# * DOIs from publications and citations
#
# ## API unpaywall
#
# https://unpaywall.org/products/api
#
# ### Schema
#
# https://unpaywall.org/data-format
#
# ### Authentication
#
# Requests must include your email as a parameter at the end of the URL, like this: api.unpaywall.org/my/request?email=YOUR_EMAIL.
#
# ### Rate limits
#
# Please limit use to 100,000 calls per day
#
# ### Exemple
#
# https://api.unpaywall.org/v2/10.1038/nature12373?email=YOUR_EMAIL
#
# In[1]:
import re
import os
import pandas as pd
import time
import datetime
import requests
# ## Extract DOIs for UNIGE
#
# In[2]:
# open UNIGE data
unige_citations = pd.read_csv('WoS UNIGE results 2001_2020/UNIGE_WoS.csv', encoding='utf-8', header=0, usecols=['Accession Number', 'DOI', 'DOI of cited article'])
unige_citations
# In[3]:
# test values
unige_citations.loc[unige_citations['DOI of cited article'].isna()]
# In[5]:
# unige publications
unige_publications = unige_citations[['Accession Number', 'DOI']].drop_duplicates(subset='Accession Number')
unige_publications
# In[6]:
# test values
unige_publications.loc[unige_publications['DOI'].isna()]
# In[8]:
# open IARC data
iarc_citations = pd.read_csv('WoS IARC results 2001_2020/IARC_WoS.csv', encoding='utf-8', header=0, usecols=['Accession Number', 'DOI', 'DOI of cited article'])
iarc_citations
# In[9]:
# test values
iarc_citations.loc[iarc_citations['DOI of cited article'].isna()]
# In[10]:
# iarc publications
iarc_publications = iarc_citations[['Accession Number', 'DOI']].drop_duplicates(subset='Accession Number')
iarc_publications
# In[11]:
# test values
iarc_publications.loc[iarc_publications['DOI'].isna()]
# In[12]:
# % of UNIGE publications with DOIs
1 - (unige_publications.loc[unige_publications['DOI'].isna()].shape[0] / unige_publications.shape[0])
# In[13]:
# % of UNIGE citations with DOIs
1 - (unige_citations.loc[unige_citations['DOI of cited article'].isna()].shape[0] / unige_citations.shape[0])
# In[15]:
# % of IARC publications with DOIs
1 - (iarc_publications.loc[iarc_publications['DOI'].isna()].shape[0] / iarc_publications.shape[0])
# In[14]:
# % of IARC citations with DOIs
1 - (iarc_citations.loc[iarc_citations['DOI of cited article'].isna()].shape[0] / iarc_citations.shape[0])
# In[16]:
# add dois
dois = unige_publications.append(iarc_publications)
dois
# In[17]:
# rename column to add dois of citations
unige_citations = unige_citations.rename(columns = {'DOI' : 'DOI_publication', 'DOI of cited article': 'DOI'})
unige_citations
# In[18]:
# rename column to add dois of citations
iarc_citations = iarc_citations.rename(columns = {'DOI' : 'DOI_publication', 'DOI of cited article': 'DOI'})
iarc_citations
# In[19]:
# remove nas
unige_citations_dois = unige_citations[['Accession Number', 'DOI']].loc[unige_citations['DOI'].notna()]
unige_citations_dois
# In[20]:
# remove nas
iarc_citations_dois = iarc_citations[['Accession Number', 'DOI']].loc[iarc_citations['DOI'].notna()]
iarc_citations_dois
# In[21]:
# add dois
dois = dois.append(unige_citations_dois)
dois = dois.append(iarc_citations_dois)
dois
# In[22]:
# remove nas and dedup
dois = dois.loc[dois['DOI'].notna()]
dois
# In[23]:
# trim
dois['DOI'] = dois['DOI'].str.strip()
dois
# In[24]:
# dedup
dois_dedup = dois.drop_duplicates(subset='DOI')
dois_dedup
# In[25]:
# reset index
dois_dedup.reset_index(drop=True, inplace=True)
dois_dedup
# In[26]:
# add ID on the first file after that it has to be obtained by DOI merge with the precedent export
# dois_dedup['ID'] = dois_dedup.index + 1
# dois_dedup
# In[27]:
# export
dois_dedup.to_csv('export_dois_all_dedup_2.tsv', sep='\t', index=False)
# ## Import unpaywall data
# In[28]:
import re
import os
import pandas as pd
import time
import datetime
import requests
from requests.exceptions import Timeout
dois_dedup = pd.read_csv('export_dois_without_unpaywall.tsv', sep='\t')
dois_dedup
# In[32]:
# import data from unpaywall
# format natifjson
for index, row in dois_dedup.iterrows():
if (((index/100) - int(index/100)) == 0) :
print(index)
mydoi = row['doi']
myid = str(row['id']).zfill(10)
# test if file exists (in case of reload of errors)
# if os.path.exists('E:/data_sources/unpaywall/eahil_2022/' + myid + '.json'):
# continue
# start from (in case of error)
# if row['id'] < 644200 :
# continue
# folder
myfolder = str(int(row['id']/100000)+1)
# 1 sec pause (not necessary, without it we don't make more than 100'000 per day)
# time.sleep(1)
searchurl = 'https://api.unpaywall.org/v2/' + mydoi + '?email=pablo.iriarte@unige.ch'
headers = {'Accept': 'application/json'}
try:
resp = requests.get(searchurl, headers=headers, timeout=30)
except Timeout:
print('ERROR TIMEOUT - ID:' + myid + ' - DOI:' + mydoi)
else:
# print(resp)
if (resp.status_code == 200):
# export
with open('E:/data_sources/unpaywall/eahil_2022/eahil_2022_' + myfolder + '/' + myid + '.json', 'w', encoding='utf-8') as f:
f.write(resp.text)
else :
print('ERROR - ID:' + myid + ' - DOI:' + mydoi)
# In[ ]: