-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbibliography.bib
634 lines (571 loc) · 45.6 KB
/
bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
@article{rosenblatt_perceptron_1958,
title = {The perceptron: A probabilistic model for information storage and organization in the brain},
volume = {65},
issn = {1939-1471},
doi = {10.1037/h0042519},
shorttitle = {The perceptron},
abstract = {To answer the questions of how information about the physical world is sensed, in what form is information remembered, and how does information retained in memory influence recognition and behavior, a theory is developed for a hypothetical nervous system called a perceptron. The theory serves as a bridge between biophysics and psychology. It is possible to predict learning curves from neurological variables and vice versa. The quantitative statistical approach is fruitful in the understanding of the organization of cognitive systems. 18 references. ({PsycINFO} Database Record (c) 2016 {APA}, all rights reserved)},
pages = {386--408},
number = {6},
journaltitle = {Psychological Review},
author = {Rosenblatt, F.},
date = {1958},
note = {Place: {US}
Publisher: American Psychological Association},
keywords = {Brain, Cognition, Memory, Nervous System},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\TYXLLMJD\\1959-09865-001.html:text/html},
}
@article{szandala_review_2021,
title = {Review and Comparison of Commonly Used Activation Functions for Deep Neural Networks},
volume = {903},
url = {http://arxiv.org/abs/2010.09458},
doi = {10.1007/978-981-15-5495-7},
abstract = {The primary neural networks decision-making units are activation functions. Moreover, they evaluate the output of networks neural node; thus, they are essential for the performance of the whole network. Hence, it is critical to choose the most appropriate activation function in neural networks calculation. Acharya et al. (2018) suggest that numerous recipes have been formulated over the years, though some of them are considered deprecated these days since they are unable to operate properly under some conditions. These functions have a variety of characteristics, which are deemed essential to successfully learning. Their monotonicity, individual derivatives, and finite of their range are some of these characteristics (Bach 2017). This research paper will evaluate the commonly used additive functions, such as swish, {ReLU}, Sigmoid, and so forth. This will be followed by their properties, own cons and pros, and particular formula application recommendations.},
journaltitle = {{arXiv}:2010.09458 [cs]},
author = {Szandała, Tomasz},
urldate = {2021-11-23},
date = {2021},
eprinttype = {arxiv},
eprint = {2010.09458},
keywords = {Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\2CQ9WG4Y\\Szandała - 2021 - Review and Comparison of Commonly Used Activation .pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\8UXJX9ZU\\2010.html:text/html},
}
@book{nielsen_quantum_2010,
title = {Quantum computation and quantum information},
isbn = {978-1-107-00217-3},
url = {http://archive.org/details/quantumcomputati00niel_993},
abstract = {One of the most cited books in physics of all time, Quantum Computation and Quantum Information remains the best textbook in this exciting field of science. This 10th anniversary edition includes an introduction from the authors setting the work in context. This comprehensive textbook describes such remarkable effects as fast quantum algorithms, quantum teleportation, quantum cryptography and quantum error-correction. Quantum mechanics and computer science are introduced before moving on to describe what a quantum computer is, how it can be used to solve problems faster than classical computers and its real-world implementation. It concludes with an in-depth treatment of quantum information. Containing a wealth of figures and exercises, this well-known textbook is ideal for courses on the subject, and will interest beginning graduate students and researchers in physics, computer science, mathematics, and electrical engineering.; Issue: 2011-03; Includes bibliographical references (p. [649]-664) and index},
pagetotal = {710},
publisher = {Cambridge ; New York : Cambridge University Press},
author = {Nielsen, Michael A. and Chuang, Isaac L.},
editora = {{Library Genesis}},
editoratype = {collaborator},
urldate = {2021-11-23},
date = {2010},
keywords = {Quantum computers},
}
@article{lstm,
author = {Hochreiter, Sepp and Schmidhuber, Jürgen},
year = {1997},
month = {12},
pages = {1735-80},
title = {Long Short-term Memory},
volume = {9},
journal = {Neural computation},
doi = {10.1162/neco.1997.9.8.1735}
}
@article{fankhauser_multiple_2021,
title = {Multiple Query Optimization using a Hybrid Approach of Classical and Quantum Computing},
url = {http://arxiv.org/abs/2107.10508},
abstract = {Quantum computing promises to solve difficult optimization problems in chemistry, physics and mathematics more efficiently than classical computers, but requires fault-tolerant quantum computers with millions of qubits. To overcome errors introduced by today's quantum computers, hybrid algorithms combining classical and quantum computers are used. In this paper we tackle the multiple query optimization problem ({MQO}) which is an important {NP}-hard problem in the area of data-intensive problems. We propose a novel hybrid classical-quantum algorithm to solve the {MQO} on a gate-based quantum computer. We perform a detailed experimental evaluation of our algorithm and compare its performance against a competing approach that employs a quantum annealer -- another type of quantum computer. Our experimental results demonstrate that our algorithm currently can only handle small problem sizes due to the limited number of qubits available on a gate-based quantum computer compared to a quantum computer based on quantum annealing. However, our algorithm shows a qubit efficiency of close to 99\% which is almost a factor of 2 higher compared to the state of the art implementation. Finally, we analyze how our algorithm scales with larger problem sizes and conclude that our approach shows promising results for near-term quantum computers.},
journaltitle = {{arXiv}:2107.10508 [quant-ph]},
author = {Fankhauser, Tobias and Solèr, Marc E. and Füchslin, Rudolf M. and Stockinger, Kurt},
urldate = {2021-11-26},
date = {2021-07-22},
eprinttype = {arxiv},
eprint = {2107.10508},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Databases, Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\FA6ILCPS\\Fankhauser et al. - 2021 - Multiple Query Optimization using a Hybrid Approac.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\NCRXEQ2X\\2107.html:text/html},
}
@article{schuld_evaluating_2019,
title = {Evaluating analytic gradients on quantum hardware},
volume = {99},
issn = {2469-9926, 2469-9934},
url = {http://arxiv.org/abs/1811.11184},
doi = {10.1103/PhysRevA.99.032331},
abstract = {An important application for near-term quantum computing lies in optimization tasks, with applications ranging from quantum chemistry and drug discovery to machine learning. In many settings --- most prominently in so-called parametrized or variational algorithms --- the objective function is a result of hybrid quantum-classical processing. To optimize the objective, it is useful to have access to exact gradients of quantum circuits with respect to gate parameters. This paper shows how gradients of expectation values of quantum measurements can be estimated using the same, or almost the same, architecture that executes the original circuit. It generalizes previous results for qubit-based platforms, and proposes recipes for the computation of gradients of continuous-variable circuits. Interestingly, in many important instances it is sufficient to run the original quantum circuit twice while shifting a single gate parameter to obtain the corresponding component of the gradient. More general cases can be solved by conditioning a single gate on an ancilla.},
pages = {032331},
number = {3},
journaltitle = {Physical Review A},
shortjournal = {Phys. Rev. A},
author = {Schuld, Maria and Bergholm, Ville and Gogolin, Christian and Izaac, Josh and Killoran, Nathan},
urldate = {2021-12-11},
date = {2019-03-21},
eprinttype = {arxiv},
eprint = {1811.11184},
keywords = {Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\JEWEI4QQ\\Schuld et al. - 2019 - Evaluating analytic gradients on quantum hardware.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\8UQEW7QB\\1811.html:text/html},
}
%------Feynmann Lectures-------%
@unpublished{feynman_feynman_1965,
title = {The Feynman Lectures on Physics Vol. {III} Ch. 12: The Hyperfine Splitting in Hydrogen},
url = {https://www.feynmanlectures.caltech.edu/III_12.html},
shorttitle = {The Hyperfine Splitting in Hydrogen},
abstract = {n this chapter we take up the “hyperfine splitting” of hydrogen, because it is a physically interesting example of what we can already do with quantum mechanics. It’s an example with more than two states, and it will be illustrative of the methods of quantum mechanics as applied to slightly more complicated problems.},
author = {Feynman, 1918-1988, Richard P. (Richard Phillips)},
date = {1965},
}
%------quantum data encoding/embedding citations--------%
@misc{leymann2019pattern,
title={Towards a Pattern Language for Quantum Algorithms},
author={Frank Leymann},
year={2019},
eprint={1906.03082},
archivePrefix={arXiv},
primaryClass={quant-ph}
}
@misc{schuld2021supervised,
title={Supervised quantum machine learning models are kernel methods},
author={Maria Schuld},
year={2021},
eprint={2101.11020},
archivePrefix={arXiv},
primaryClass={quant-ph}
}
@article{Quantum_machine_learning_in_feature_Hilbert_spaces_2019,
title={Quantum Machine Learning in Feature Hilbert Spaces},
volume={122},
ISSN={1079-7114},
url={http://dx.doi.org/10.1103/PhysRevLett.122.040504},
DOI={10.1103/physrevlett.122.040504},
number={4},
journal={Physical Review Letters},
publisher={American Physical Society (APS)},
author={Schuld, Maria and Killoran, Nathan},
year={2019},
month={Feb}
}
@article{Supervised_learning_with_quantum-enhanced_feature_spaces_2019,
title={Supervised learning with quantum-enhanced feature spaces},
volume={567},
ISSN={1476-4687},
url={http://dx.doi.org/10.1038/s41586-019-0980-2},
DOI={10.1038/s41586-019-0980-2},
number={7747},
journal={Nature},
publisher={Springer Science and Business Media LLC},
author={Havlíček, Vojtěch and Córcoles, Antonio D. and Temme, Kristan and Harrow, Aram W. and Kandala, Abhinav and Chow, Jerry M. and Gambetta, Jay M.},
year={2019},
month={Mar},
pages={209–212}
}
@article{Quantum_embeddings_for_machine_learning_2020,
title={Quantum embeddings for machine learning},
author={Seth Lloyd and Maria Schuld and Aroosa Ijaz and Josh Izaac and Nathan Killoran},
year={2020},
eprint={2001.03622},
archivePrefix={arXiv},
primaryClass={quant-ph}
}
@inproceedings{Weigold2021_ExpandingDataEncodingPatterns,
author = {Weigold, Manuela and Barzen, Johanna and Leymann, Frank and Salm, Marie},
title = {Expanding Data Encoding Patterns For Quantum Algorithms},
booktitle = {2021 IEEE 18\textsuperscript{th} International Conference on Software Architecture Companion (ICSA-C)},
year = {2021},
pages = {95--101},
doi = {10.1109/ICSA-C52384.2021.00025},
publisher = {IEEE}
}
@article{Weigold2021_EncodingPatternsForQuantumAlgorithms,
author = {Weigold, Manuela and Barzen, Johanna and Leymann, Frank and Salm, Marie},
title = {Encoding patterns for quantum algorithms},
journal = {IET Quantum Communication},
volume = {2},
number = {4},
pages = {141-152},
keywords = {computational complexity, quantum computing techniques, quantum computing},
doi = {https://doi.org/10.1049/qtc2.12032},
url = {https://ietresearch.onlinelibrary.wiley.com/doi/abs/10.1049/qtc2.12032},
eprint = {https://ietresearch.onlinelibrary.wiley.com/doi/pdf/10.1049/qtc2.12032},
abstract = {Abstract As quantum computers are based on the laws of quantum mechanics, they are capable of solving certain problems faster than their classical counterparts. However, quantum algorithms with a theoretical speed-up often assume that data can be loaded efficiently. In general, the runtime complexity of the loading routine depends on (i) the data encoding that defines how the data is represented by the state of the quantum computer and (ii) the data itself. In some cases, loading the data requires at least exponential time that destroys a potential speed-up. And especially for the first generation of devices that are currently available, the resources (qubits and operations) needed to encode the data are limited. In this work, we, therefore, present six patterns that describe how data is handled by quantum computers.},
year = {2021}
}
@article{araujoDivideandconquerAlgorithmQuantum2021,
title = {A Divide-and-Conquer Algorithm for Quantum State Preparation},
author = {Araujo, Israel F. and Park, Daniel K. and Petruccione, Francesco and {da Silva}, Adenilton J.},
year = {2021},
month = mar,
journal = {Scientific Reports},
volume = {11},
number = {1},
pages = {6329},
publisher = {{Nature Publishing Group}},
issn = {2045-2322},
doi = {10.1038/s41598-021-85474-1},
abstract = {Advantages in several fields of research and industry are expected with the rise of quantum computers. However, the computational cost to load classical data in quantum computers can impose restrictions on possible quantum speedups. Known algorithms to create arbitrary quantum states require quantum circuits with depth O(N) to load an N-dimensional vector. Here, we show that it is possible to load an N-dimensional vector with exponential time advantage using a quantum circuit with polylogarithmic depth and entangled information in ancillary qubits. Results show that we can efficiently load data in quantum devices using a divide-and-conquer strategy to exchange computational time for space. We demonstrate a proof of concept on a real quantum device and present two applications for quantum machine learning. We expect that this new loading strategy allows the quantum speedup of tasks that require to load a significant volume of information to quantum devices.},
copyright = {2021 The Author(s)},
langid = {english},
keywords = {Computer science,Information technology,Quantum information,Qubits},
annotation = {Bandiera\_abtest: a Cc\_license\_type: cc\_by Cg\_type: Nature Research Journals Primary\_atype: Research Subject\_term: Computer science;Information technology;Quantum information;Qubits Subject\_term\_id: computer-science;information-technology;quantum-information;qubits},
file = {/Users/phuber/Zotero/storage/847FDBNU/Araujo et al. - 2021 - A divide-and-conquer algorithm for quantum state p.pdf;/Users/phuber/Zotero/storage/HM8H2NJH/s41598-021-85474-1.html}
}
@article{leymannBitterTruthGatebased2020,
title = {The Bitter Truth about Gate-Based Quantum Algorithms in the {{NISQ}} Era},
author = {Leymann, Frank and Barzen, Johanna},
year = {2020},
month = oct,
journal = {Quantum Science and Technology},
volume = {5},
number = {4},
pages = {044007},
issn = {2058-9565},
doi = {10.1088/2058-9565/abae7d},
abstract = {Abstract Implementing a gate-based quantum algorithm on an noisy intermediate scale quantum (NISQ) device has several challenges that arise from the fact that such devices are noisy and have limited quantum resources. Thus, various factors contributing to the depth and width as well as to the noise of an implementation of a gate-based algorithm must be understood in order to assess whether an implementation will execute successfully on a given NISQ device. In this contribution, we discuss these factors and their impact on algorithm implementations. Especially, we will cover state preparation, oracle expansion, connectivity, circuit rewriting, and readout: these factors are very often ignored when presenting a gate-based algorithm but they are crucial when implementing such an algorithm on near-term quantum computers. Our contribution will help developers in charge of realizing gate-based algorithms on such machines in (i) achieving an executable implementation, and (ii) assessing the success of their implementation on a given machine.},
file = {/Users/phuber/Zotero/storage/FIB63QET/Leymann und Barzen - 2020 - The bitter truth about gate-based quantum algorith.pdf}
}
@article{PoincarDataPreprocessinForQuantumMachineLearning_2021,
author = {Sierra-Sosa, Daniel and Arcila-Moreno, Juan and Zapirain, Begoña and Elmaghraby, Adel},
year = {2021},
month = {02},
pages = {1849-1861},
title = {Diabetes Type 2: Poincar Data Preprocessing for Quantum Machine Learning},
volume = {67},
journal = {Computers, Materials and Continua},
doi = {10.32604/cmc.2021.013196}
}
@article{SHRIVASTAVA20201849,
title = {Classical Equivalent Quantum Unsupervised Learning Algorithms},
author = {Shrivastava, Prakhar and Soni, Kapil Kumar and Rasool, Akhtar},
year = {2020},
journal = {Procedia Computer Science},
volume = {167},
pages = {1849--1860},
issn = {1877-0509},
doi = {10.1016/j.procs.2020.03.204},
abstract = {The paper presents necessity of data pre-processing to process the data set through machine learning algorithms, and it filters the raw data to make the data as in compatible format for analysis purpose. The methods such as mean normalization, feature scaling and dimensionality reduction are used for data smoothing and therefore formatted data set gets processed by learning algorithms to predict best possible outcome as per the analysis. Machine learning became famous for such processing purpose and through unsupervised learning we are capable of processing over various data formats. Such algorithms performance is up-to the mark, but as per the availability of inherent parallel processing through quantum machines, computational speedup can be achieved by designing classical equivalent quantum machine learning algorithms. In this paper, we discussed some of the classical unsupervised learning algorithms, and then we propose the equivalent quantum version of algorithms along with the mathematical justification over the complexity analysis and achieved computational speedup and show the betterment by processing such problems over quantum machines.},
keywords = {Data Mining,Data Preprocessing,Grover’s Search,Machine Learnig,QRAM,Superposition,Unsupervised Learning: Quantum Computing}
}
%%% Datasets
@article{fisher_use_1936,
title = {The Use of Multiple Measurements in Taxonomic Problems},
volume = {7},
issn = {2050-1439},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1469-1809.1936.tb02137.x},
doi = {10.1111/j.1469-1809.1936.tb02137.x},
abstract = {The articles published by the Annals of Eugenics (1925–1954) have been made available online as an historical archive intended for scholarly use. The work of eugenicists was often pervaded by prejudice against racial, ethnic and disabled groups. The online publication of this material for scholarly research purposes is not an endorsement of those views nor a promotion of eugenics in any way.},
pages = {179--188},
number = {2},
journaltitle = {Annals of Eugenics},
author = {Fisher, R. A.},
urldate = {2021-12-14},
date = {1936},
langid = {english},
note = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.1469-1809.1936.tb02137.x},
file = {Full Text PDF:C\:\\Users\\ricar\\Zotero\\storage\\4TRPG8HP\\Fisher - 1936 - The Use of Multiple Measurements in Taxonomic Prob.pdf:application/pdf;Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\X4W658DS\\j.1469-1809.1936.tb02137.html:text/html},
}
@article{ahmad_survival_2017,
title = {Survival analysis of heart failure patients: A case study},
volume = {12},
issn = {1932-6203},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0181001},
doi = {10.1371/journal.pone.0181001},
shorttitle = {Survival analysis of heart failure patients},
abstract = {This study was focused on survival analysis of heart failure patients who were admitted to Institute of Cardiology and Allied hospital Faisalabad-Pakistan during April-December (2015). All the patients were aged 40 years or above, having left ventricular systolic dysfunction, belonging to {NYHA} class {III} and {IV}. Cox regression was used to model mortality considering age, ejection fraction, serum creatinine, serum sodium, anemia, platelets, creatinine phosphokinase, blood pressure, gender, diabetes and smoking status as potentially contributing for mortality. Kaplan Meier plot was used to study the general pattern of survival which showed high intensity of mortality in the initial days and then a gradual increase up to the end of study. Martingale residuals were used to assess functional form of variables. Results were validated computing calibration slope and discrimination ability of model via bootstrapping. For graphical prediction of survival probability, a nomogram was constructed. Age, renal dysfunction, blood pressure, ejection fraction and anemia were found as significant risk factors for mortality among heart failure patients.},
pages = {e0181001},
number = {7},
journaltitle = {{PLOS} {ONE}},
shortjournal = {{PLOS} {ONE}},
author = {Ahmad, Tanvir and Munir, Assia and Bhatti, Sajjad Haider and Aftab, Muhammad and Raza, Muhammad Ali},
urldate = {2021-12-18},
date = {2017-07-20},
langid = {english},
note = {Publisher: Public Library of Science},
keywords = {Anemia, Blood pressure, Creatinine, Diabetes mellitus, Ejection fraction, Heart, Heart failure, Platelets},
file = {Full Text PDF:C\:\\Users\\ricar\\Zotero\\storage\\K2QIFDMX\\Ahmad et al. - 2017 - Survival analysis of heart failure patients A cas.pdf:application/pdf;Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\X63XB966\\article.html:text/html},
}
@article{sim_expressibility_2019,
title = {Expressibility and entangling capability of parameterized quantum circuits for hybrid quantum-classical algorithms},
volume = {2},
issn = {2511-9044, 2511-9044},
url = {http://arxiv.org/abs/1905.10876},
doi = {10.1002/qute.201900070},
abstract = {Parameterized quantum circuits play an essential role in the performance of many variational hybrid quantum-classical ({HQC}) algorithms. One challenge in implementing such algorithms is to choose an effective circuit that well represents the solution space while maintaining a low circuit depth and number of parameters. To characterize and identify expressible, yet compact, parameterized circuits, we propose several descriptors, including measures of expressibility and entangling capability, that can be statistically estimated from classical simulations of parameterized quantum circuits. We compute these descriptors for different circuit structures, varying the qubit connectivity and selection of gates. From our simulations, we identify circuit fragments that perform well with respect to the descriptors. In particular, we quantify the substantial improvement in performance of two-qubit gates in a ring or all-to-all connected arrangement compared to that of those on a line. Furthermore, we quantify the improvement in expressibility and entangling capability achieved by sequences of controlled X-rotation gates compared to sequences of controlled Z-rotation gates. In addition, we investigate how expressibility "saturates" with increased circuit depth, finding that the rate and saturated-value appear to be distinguishing features of a parameterized quantum circuit template. While the correlation between each descriptor and performance of an algorithm remains to be investigated, methods and results from this study can be useful for both algorithm development and design of experiments for general variational {HQC} algorithms.},
pages = {1900070},
number = {12},
journaltitle = {Advanced Quantum Technologies},
shortjournal = {Adv Quantum Tech},
author = {Sim, Sukin and Johnson, Peter D. and Aspuru-Guzik, Alan},
urldate = {2021-12-18},
date = {2019-12},
eprinttype = {arxiv},
eprint = {1905.10876},
keywords = {Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\MBBH9XUE\\Sim et al. - 2019 - Expressibility and entangling capability of parame.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\QZF7HM8I\\1905.html:text/html},
}
@incollection{scherer_quantum_2019,
location = {Cham},
title = {Quantum Gates and Circuits for Elementary Calculations},
isbn = {978-3-030-12358-1},
url = {https://doi.org/10.1007/978-3-030-12358-1_5},
abstract = {This chapter begins with a brief review of classical logical gates and a proof of the universality of the Toffoli gate for classical gates. This is followed by the presentation of a few unary and binary quantum gates and by a proof of the universality of phase-multiplication, spin-rotation and controlled {NOT} for quantum gates. Furthermore, a few general aspects of quantum algorithms are considered, before we finally present quantum circuits for elementary arithmetical operations like addition, addition modulo N and multiplication modulo N as well as for the quantum Fourier transform.},
pages = {161--246},
booktitle = {Mathematics of Quantum Computing: An Introduction},
publisher = {Springer International Publishing},
author = {Scherer, Wolfgang},
editor = {Scherer, Wolfgang},
urldate = {2021-12-20},
date = {2019},
langid = {english},
doi = {10.1007/978-3-030-12358-1_5},
}
@article{shaib_efficient_2021,
title = {Efficient Noise Mitigation Technique for Quantum Computing},
url = {http://arxiv.org/abs/2109.05136},
abstract = {Quantum computers have enabled solving problems beyond the current computers' capabilities. However, this requires handling noise arising from unwanted interactions in these systems. Several protocols have been proposed to address efficient and accurate quantum noise profiling and mitigation. In this work, we propose a novel protocol that efficiently estimates the average output of a noisy quantum device to be used for quantum noise mitigation. The multi-qubit system average behavior is approximated as a special form of a Pauli Channel where Clifford gates are used to estimate the average output for circuits of different depths. The characterized Pauli channel error rates, and state preparation and measurement errors are then used to construct the outputs for different depths thereby eliminating the need for large simulations and enabling efficient mitigation. We demonstrate the efficiency of the proposed protocol on four {IBM} Q 5-qubit quantum devices. Our method demonstrates improved accuracy with efficient noise characterization. We report up to 88{\textbackslash}\% and 69{\textbackslash}\% improvement for the proposed approach compared to the unmitigated, and pure measurement error mitigation approaches, respectively.},
journaltitle = {{arXiv}:2109.05136 [quant-ph]},
author = {Shaib, Ali and Naim, Mohamad H. and Fouda, Mohammed E. and Kanj, Rouwaida and Kurdahi, Fadi},
urldate = {2021-12-23},
date = {2021-09-10},
eprinttype = {arxiv},
eprint = {2109.05136},
keywords = {Computer Science - Information Theory, Electrical Engineering and Systems Science - Signal Processing, Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\JRT4G4UX\\Shaib et al. - 2021 - Efficient Noise Mitigation Technique for Quantum C.pdf:application/pdf},
}
@article{georgopoulos_modelling_2021,
title = {Modelling and Simulating the Noisy Behaviour of Near-term Quantum Computers},
volume = {104},
issn = {2469-9926, 2469-9934},
url = {http://arxiv.org/abs/2101.02109},
doi = {10.1103/PhysRevA.104.062432},
abstract = {Noise dominates every aspect of near-term quantum computers, rendering it exceedingly difficult to carry out even small computations. In this paper we are concerned with the modelling of noise in Noisy Intermediate-Scale Quantum ({NISQ}) computers. We focus on three error groups that represent the main sources of noise during a computation and present quantum channels that model each source. We engineer a noise model that combines all three noise channels and simulates the evolution of the quantum computer using its calibrated error rates. We run various experiments of our model, showcasing its behaviour compared to other noise models and an {IBM} quantum computer. We find that our model provides a better approximation of the quantum computer's behaviour than the other models. Following this, we use a genetic algorithm to optimize the parameters used by our noise model, bringing the behaviour of the model even closer to the quantum computer. Finally, a comparison between the pre and postoptimization parameters reveals that, according to our model, certain operations can be more or less erroneous than the hardware-calibrated parameters show.},
pages = {062432},
number = {6},
journaltitle = {Physical Review A},
shortjournal = {Phys. Rev. A},
author = {Georgopoulos, Konstantinos and Emary, Clive and Zuliani, Paolo},
urldate = {2021-12-23},
date = {2021-12-17},
eprinttype = {arxiv},
eprint = {2101.02109},
keywords = {Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\FSJ3Q8ZM\\Georgopoulos et al. - 2021 - Modelling and Simulating the Noisy Behaviour of Ne.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\5IMCWS5W\\2101.html:text/html},
}
@article{havlicek_supervised_2019,
title = {Supervised learning with quantum enhanced feature spaces},
volume = {567},
issn = {0028-0836, 1476-4687},
url = {http://arxiv.org/abs/1804.11326},
doi = {10.1038/s41586-019-0980-2},
abstract = {Machine learning and quantum computing are two technologies each with the potential for altering how computation is performed to address previously untenable problems. Kernel methods for machine learning are ubiquitous for pattern recognition, with support vector machines ({SVMs}) being the most well-known method for classification problems. However, there are limitations to the successful solution to such problems when the feature space becomes large, and the kernel functions become computationally expensive to estimate. A core element to computational speed-ups afforded by quantum algorithms is the exploitation of an exponentially large quantum state space through controllable entanglement and interference. Here, we propose and experimentally implement two novel methods on a superconducting processor. Both methods represent the feature space of a classification problem by a quantum state, taking advantage of the large dimensionality of quantum Hilbert space to obtain an enhanced solution. One method, the quantum variational classifier builds on [1,2] and operates through using a variational quantum circuit to classify a training set in direct analogy to conventional {SVMs}. In the second, a quantum kernel estimator, we estimate the kernel function and optimize the classifier directly. The two methods present a new class of tools for exploring the applications of noisy intermediate scale quantum computers [3] to machine learning.},
pages = {209--212},
number = {7747},
journaltitle = {Nature},
shortjournal = {Nature},
author = {Havlicek, Vojtech and Córcoles, Antonio D. and Temme, Kristan and Harrow, Aram W. and Kandala, Abhinav and Chow, Jerry M. and Gambetta, Jay M.},
urldate = {2021-12-18},
date = {2019-03},
eprinttype = {arxiv},
eprint = {1804.11326},
keywords = {Quantum Physics, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\YJ6YTJVZ\\Havlicek et al. - 2019 - Supervised learning with quantum enhanced feature .pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\XRPQ27E2\\1804.html:text/html},
}
@online{sakhiya_heart_nodate,
title = {Heart Fail:Analysis and Quick-prediction},
url = {https://kaggle.com/nayansakhiya/heart-fail-analysis-and-quick-prediction},
author = {Nayan Sakhiya},
shorttitle = {Heart Fail},
abstract = {Explore and run machine learning code with Kaggle Notebooks {\textbar} Using data from Heart Failure Prediction},
urldate = {2021-12-18},
langid = {english},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\I9QLW5HE\\heart-fail-analysis-and-quick-prediction.html:text/html},
}
@article{bloch_nuclear_induction,
title = {Nuclear Induction},
author = {Bloch, F.},
journal = {Phys. Rev.},
volume = {70},
issue = {7-8},
pages = {460--474},
numpages = {0},
year = {1946},
month = {Oct},
publisher = {American Physical Society},
doi = {10.1103/PhysRev.70.460},
url = {https://link.aps.org/doi/10.1103/PhysRev.70.460}
}
@book{lars_complex_1978,
location = {New York},
edition = {3},
title = {Complex analysis},
isbn = {0-07-000657-1},
series = {International Series in Pure and Applied Mathematics},
abstract = {An introduction to the theory of analytic functions of one complex variable.},
publisher = {{McGraw}-Hill Book Co.},
author = {Lars, Ahlfors},
date = {1978},
}
@book{marshall_c_methods_1964,
title = {Methods of Matrix Algebra},
isbn = {978-0-08-095522-3},
url = {https://www.elsevier.com/books/methods-of-matrix-algebra/pease/978-0-12-548850-1},
pagetotal = {405},
publisher = {Academic Press},
author = {Marshall C., Pease {III}},
date = {1964-01-01},
}
@article{baischer_learning_2021,
title = {Learning on Hardware: A Tutorial on Neural Network Accelerators and Co-Processors},
url = {http://arxiv.org/abs/2104.09252},
shorttitle = {Learning on Hardware},
abstract = {Deep neural networks ({DNNs}) have the advantage that they can take into account a large number of parameters, which enables them to solve complex tasks. In computer vision and speech recognition, they have a better accuracy than common algorithms, and in some tasks, they boast an even higher accuracy than human experts. With the progress of {DNNs} in recent years, many other fields of application such as diagnosis of diseases and autonomous driving are taking advantage of them. The trend at {DNNs} is clear: The network size is growing exponentially, which leads to an exponential increase in computational effort and required memory size. For this reason, optimized hardware accelerators are used to increase the performance of the inference of neuronal networks. However, there are various neural network hardware accelerator platforms, such as graphics processing units ({GPUs}), application specific integrated circuits ({ASICs}) and field programmable gate arrays ({FPGAs}). Each of these platforms offer certain advantages and disadvantages. Also, there are various methods for reducing the computational effort of {DNNs}, which are differently suitable for each hardware accelerator. In this article an overview of existing neural network hardware accelerators and acceleration methods is given. Their strengths and weaknesses are shown and a recommendation of suitable applications is given. In particular, we focus on acceleration of the inference of convolutional neural networks ({CNNs}) used for image recognition tasks. Given that there exist many different hardware architectures. {FPGA}-based implementations are well-suited to show the effect of {DNN} optimization methods on accuracy and throughput. For this reason, the focus of this work is more on {FPGA}-based implementations.},
journaltitle = {{arXiv}:2104.09252 [cs]},
author = {Baischer, Lukas and Wess, Matthias and {TaheriNejad}, Nima},
urldate = {2021-12-23},
date = {2021-04-19},
eprinttype = {arxiv},
eprint = {2104.09252},
keywords = {Computer Science - Hardware Architecture, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\QVWGLZIG\\Baischer et al. - 2021 - Learning on Hardware A Tutorial on Neural Network.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\5LSGYHRB\\2104.html:text/html},
}
%% web articles
@misc{PennyLane_QuantumEmbedding,
title = {Quantum embedding — {PennyLane}},
url = {https://pennylane.ai/qml/glossary/quantum_embedding.html},
urldate = {2021-12-03},
file = {Snapshot:/Users/phuber/Zotero/storage/VB95DBM8/quantum_embedding.html:text/html},
}
@online{PennyLane_QuantumFeatureMap,
title = {Quantum Feature Map — {PennyLane}},
url = {https://pennylane.ai/qml/glossary/quantum_feature_map.html},
author = {Xanadu},
urldate = {2021-12-18},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\58S59TGB\\quantum_feature_map.html:text/html},
}
@misc{VariationalClassifierPennyLane,
title = {Variational classifier — {PennyLane}},
url = {https://pennylane.ai/qml/demos/tutorial_variational_classifier.html},
abstract = {Using PennyLane to implement quantum circuits that can be trained from labelled data to classify new data samples.},
urldate = {2021-12-12},
file = {Snapshot:/Users/phuber/Zotero/storage/8QNUDNUH/tutorial_variational_classifier.html:text/html},
}
@article{farhi_quantum_2014,
title = {A Quantum Approximate Optimization Algorithm},
url = {http://arxiv.org/abs/1411.4028},
abstract = {We introduce a quantum algorithm that produces approximate solutions for combinatorial optimization problems. The algorithm depends on a positive integer p and the quality of the approximation improves as p is increased. The quantum circuit that implements the algorithm consists of unitary gates whose locality is at most the locality of the objective function whose optimum is sought. The depth of the circuit grows linearly with p times (at worst) the number of constraints. If p is fixed, that is, independent of the input size, the algorithm makes use of efficient classical preprocessing. If p grows with the input size a different strategy is proposed. We study the algorithm as applied to {MaxCut} on regular graphs and analyze its performance on 2-regular and 3-regular graphs for fixed p. For p = 1, on 3-regular graphs the quantum algorithm always finds a cut that is at least 0.6924 times the size of the optimal cut.},
journaltitle = {{arXiv}:1411.4028 [quant-ph]},
author = {Farhi, Edward and Goldstone, Jeffrey and Gutmann, Sam},
urldate = {2021-12-20},
date = {2014-11-14},
eprinttype = {arxiv},
eprint = {1411.4028},
keywords = {Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\FXMNLIL4\\Farhi et al. - 2014 - A Quantum Approximate Optimization Algorithm.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\4MD4AXTB\\1411.html:text/html},
}
@online{openai_ai_2018,
title = {{AI} and Compute},
url = {https://openai.com/blog/ai-and-compute/},
abstract = {We’re releasing an analysis showing that since 2012, the amount of compute used in the largest {AI} training runs has been increasing exponentially with a 3.4-month doubling time (by comparison, Moore’s Law had a 2-year doubling period).},
titleaddon = {{OpenAI}},
author = {{OpenAI}},
urldate = {2021-12-20},
date = {2018-05-16},
langid = {english},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\GP63FCCP\\ai-and-compute.html:text/html},
}
@article{shor_polynomial-time_1997,
title = {Polynomial-Time Algorithms for Prime Factorization and Discrete Logarithms on a Quantum Computer},
volume = {26},
issn = {0097-5397, 1095-7111},
url = {http://arxiv.org/abs/quant-ph/9508027},
doi = {10.1137/S0097539795293172},
abstract = {A digital computer is generally believed to be an efficient universal computing device; that is, it is believed able to simulate any physical computing device with an increase in computation time of at most a polynomial factor. This may not be true when quantum mechanics is taken into consideration. This paper considers factoring integers and finding discrete logarithms, two problems which are generally thought to be hard on a classical computer and have been used as the basis of several proposed cryptosystems. Efficient randomized algorithms are given for these two problems on a hypothetical quantum computer. These algorithms take a number of steps polynomial in the input size, e.g., the number of digits of the integer to be factored.},
pages = {1484--1509},
number = {5},
journaltitle = {{SIAM} Journal on Computing},
shortjournal = {{SIAM} J. Comput.},
author = {Shor, Peter W.},
urldate = {2021-12-20},
date = {1997-10},
eprinttype = {arxiv},
eprint = {quant-ph/9508027},
keywords = {Quantum Physics},
file = {arXiv Fulltext PDF:C\:\\Users\\ricar\\Zotero\\storage\\Y3Z8Z5MP\\Shor - 1997 - Polynomial-Time Algorithms for Prime Factorization.pdf:application/pdf;arXiv.org Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\556WZCTP\\9508027.html:text/html},
}
%-----scikit citations---------%
@online{scikit_sklearnpreprocessingminmaxscaler_nodate,
title = {sklearn.preprocessing.{MinMaxScaler}},
author = {scikit-learn developers},
url = {https://scikit-learn/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html},
abstract = {Examples using sklearn.preprocessing.{MinMaxScaler}: Release Highlights for scikit-learn 0.24 Release Highlights for scikit-learn 0.24, Image denoising using kernel {PCA} Image denoising using kernel P...},
titleaddon = {scikit-learn},
urldate = {2021-12-20},
langid = {english},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\AGB6JTQ5\\sklearn.preprocessing.MinMaxScaler.html:text/html},
}
@online{scikit_sklearnpreprocessingstandardscaler_nodate,
title = {sklearn.preprocessing.{StandardScaler}},
author = {scikit-learn developers},
url = {https://scikit-learn/stable/modules/generated/sklearn.preprocessing.StandardScaler.html},
abstract = {Examples using sklearn.preprocessing.{StandardScaler}: Release Highlights for scikit-learn 1.0 Release Highlights for scikit-learn 1.0, Release Highlights for scikit-learn 0.23 Release Highlights for...},
titleaddon = {scikit-learn},
urldate = {2021-12-20},
langid = {english},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\KSQM2YKU\\sklearn.preprocessing.StandardScaler.html:text/html},
}
%------qiskit citations--------%
@online{qiskit_ccxgate_nodate,
title = {{CCXGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.CCXGate.html},
urldate = {2021-11-23},
file = {CCXGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\RZHACSCG\\qiskit.circuit.library.CCXGate.html:text/html},
}
@online{qiskit_xgate_nodate,
title = {{XGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.XGate.html#qiskit.circuit.library.XGate},
urldate = {2021-11-23},
file = {XGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\EKY6WGD2\\qiskit.circuit.library.XGate.html:text/html},
}
@online{qiskit_hgate_nodate,
title = {{HGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.HGate.html},
urldate = {2021-11-28},
file = {HGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\Y96E48X8\\qiskit.circuit.library.HGate.html:text/html},
}
@online{qiskit_rygate_nodate,
title = {{RYGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.RYGate.html},
urldate = {2021-11-28},
file = {RYGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\DZLWUVMP\\qiskit.circuit.library.RYGate.html:text/html},
}
@online{qiskit_czgate_nodate,
title = {{CZGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.CZGate.html},
urldate = {2021-12-05},
file = {CZGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\C2X287I3\\qiskit.circuit.library.CZGate.html:text/html},
}
@online{qiskit_cygate_nodate,
title = {{CYGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.CYGate.html},
urldate = {2021-12-05},
file = {CYGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\UTTBYUJB\\qiskit.circuit.library.CYGate.html:text/html},
}
@online{qiskit_cxgate_nodate,
title = {{CXGate} — Qiskit 0.32.1 documentation},
author = {Qiskit Development Team},
url = {https://qiskit.org/documentation/stubs/qiskit.circuit.library.CXGate.html},
urldate = {2021-12-05},
file = {CXGate — Qiskit 0.32.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\UTTBYUJB\\qiskit.circuit.library.CXGate.html:text/html},
}
@online{qiskit_neural_nodate,
title = {Neural Network Classifier \& Regressor — Qiskit Machine Learning 0.2.1 documentation},
url = {https://qiskit.org/documentation/machine-learning/tutorials/02_neural_network_classifier_and_regressor.html},
author = {Qiskit Development Team},
urldate = {2021-12-11},
file = {Neural Network Classifier & Regressor — Qiskit Machine Learning 0.2.1 documentation:C\:\\Users\\ricar\\Zotero\\storage\\X9RMVIUD\\02_neural_network_classifier_and_regressor.html:text/html},
}
@online{qiskit_single_qubit_gates_nodate,
title = {Single Qubit Gates},
url = {https://community.qiskit.org/textbook/ch-states/single-qubit-gates.html},
abstract = {A university quantum algorithms/computation course supplement based on Qiskit},
author = {Qiskit Development Team},
urldate = {2021-12-21},
langid = {english},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\RF3W4MFS\\single-qubit-gates.html:text/html},
}
@online{ibm_ibm_nodate,
title = {{IBM} Quantum},
url = {https://quantum-computing.ibm.com/services},
shorttitle = {View the availability and details of {IBM} Quantum programs, systems, and simulators.},
abstract = {Program real quantum systems with the leading quantum cloud application.},
titleaddon = {{IBM} Quantum},
author = {{IBM}},
urldate = {2021-12-26},
langid = {english},
file = {Snapshot:C\:\\Users\\ricar\\Zotero\\storage\\A5IKEH8N\\services.html:text/html},
}