From 1cb14cbd13bd94f6e9f40f2e9313bf585569e84c Mon Sep 17 00:00:00 2001 From: Daniel Elton Date: Thu, 19 Dec 2019 18:50:47 -0500 Subject: [PATCH] add discussion in interpretability section and update section on molecular design --- build/ci/cache/requests-cache.sqlite | Bin 0 -> 20480 bytes build/output/citations.tsv | 1 + build/output/manuscript.md | 38 +++++++++++++++++++++++++++ build/output/references.json | 1 + build/output/variables.json | 17 ++++++++++++ build/webpage/v/freeze/index.html | 19 ++++++++++++++ build/webpage/v/latest | 1 + content/05.treat.md | 36 +++++++++++-------------- content/06.discussion.md | 34 +++++++++++++----------- content/citation-tags.tsv | 7 ++++- 10 files changed, 117 insertions(+), 37 deletions(-) create mode 100644 build/ci/cache/requests-cache.sqlite create mode 100644 build/output/citations.tsv create mode 100644 build/output/manuscript.md create mode 100644 build/output/references.json create mode 100644 build/output/variables.json create mode 100644 build/webpage/v/freeze/index.html create mode 120000 build/webpage/v/latest diff --git a/build/ci/cache/requests-cache.sqlite b/build/ci/cache/requests-cache.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..5f5d7c8e968a7214f2eb1395eb7369bddc7dc5be GIT binary patch literal 20480 zcmeI%K}*9h7=YoV>xvUbcPTwCHx)!E9(UQ49%K`B?y^%?F~YE}y0)S}a6ibKN1MWm z9(wdr-ayhXA?-(=^b&G^H(TdgJ*C^1I9KOlAfyyyrGyYxS-WN37t^=*o$@Jv#Z93l z&PHE-_gmO*Pq?|eb9?TzX)@yoAb1$Bir#uo)qiNqyEU=lJZ;~ z#YLW0{m8r<1*YxZNXH)zWm=`$>g+Y$WIC$@>u=I}yg74VIes{7%3OaS3hJ;^dL9_l zzG()gp&)<&0tg_000IagfB*srAb>zy1rE)^IRCeGds!C(2q1s}0tg_000IagfB*uO z0Qdj=2nZm600IagfB*srAb0R#|0009ILKmY**5J2Du^0!$7 literal 0 HcmV?d00001 diff --git a/build/output/citations.tsv b/build/output/citations.tsv new file mode 100644 index 00000000..e4349893 --- /dev/null +++ b/build/output/citations.tsv @@ -0,0 +1 @@ +manuscript_citekey detagged_citekey standard_citekey short_citekey diff --git a/build/output/manuscript.md b/build/output/manuscript.md new file mode 100644 index 00000000..df0d610a --- /dev/null +++ b/build/output/manuscript.md @@ -0,0 +1,38 @@ +--- +author-meta: [] +date-meta: '2019-12-19' +header-includes: ' + + + + + + + + + + + + + + + + + + + + + + + + + + ' +... + diff --git a/build/output/references.json b/build/output/references.json new file mode 100644 index 00000000..fe51488c --- /dev/null +++ b/build/output/references.json @@ -0,0 +1 @@ +[] diff --git a/build/output/variables.json b/build/output/variables.json new file mode 100644 index 00000000..88840dd9 --- /dev/null +++ b/build/output/variables.json @@ -0,0 +1,17 @@ +{ + "pandoc": { + "date-meta": "2019-12-19", + "author-meta": [], + "header-includes": "\n\n\n\n\n\n\n\n\n\n\n\n\n" + }, + "manubot": { + "date": "December 19, 2019", + "authors": [], + "manuscript_stats": { + "reference_counts": { + "total": 0 + }, + "word_count": 0 + } + } +} diff --git a/build/webpage/v/freeze/index.html b/build/webpage/v/freeze/index.html new file mode 100644 index 00000000..bff3da63 --- /dev/null +++ b/build/webpage/v/freeze/index.html @@ -0,0 +1,19 @@ + + + + + + + + Page Redirection + + + If you are not redirected automatically, follow this link. + + diff --git a/build/webpage/v/latest b/build/webpage/v/latest new file mode 120000 index 00000000..c2c027fe --- /dev/null +++ b/build/webpage/v/latest @@ -0,0 +1 @@ +local \ No newline at end of file diff --git a/content/05.treat.md b/content/05.treat.md index 96db7d25..3c71ba95 100644 --- a/content/05.treat.md +++ b/content/05.treat.md @@ -180,28 +180,24 @@ However, in the long term, atomic convolutions may ultimately overtake grid-base #### *De novo* drug design -*De novo* drug design attempts to model the typical design-synthesize-test cycle of drug discovery [@doi:10.1002/wcms.49; @doi:10.1021/acs.jmedchem.5b01849]. +*De novo* drug design attempts to model the typical design-synthesize-test cycle of drug discovery in-silico [@doi:10.1002/wcms.49; @doi:10.1021/acs.jmedchem.5b01849]. It explores an estimated 1060 synthesizable organic molecules with drug-like properties without explicit enumeration [@doi:10.1002/wcms.1104]. -To test or score structures, algorithms like those discussed earlier are used. +To test or score structures, physics-based simulation could be used, or machine learning models based on techniques discussed may be used, as they are much more computationally efficient. To "design" and "synthesize", traditional *de novo* design software relied on classical optimizers such as genetic algorithms. -Unfortunately, this often leads to overfit, "weird" molecules, which are difficult to synthesize in the lab. -Current programs have settled on rule-based virtual chemical reactions to generate molecular structures [@doi:10.1021/acs.jmedchem.5b01849]. -Deep learning models that generate realistic, synthesizable molecules have been proposed as an alternative. -In contrast to the classical, symbolic approaches, generative models learned from data would not depend on laboriously encoded expert knowledge. -The challenge of generating molecules has parallels to the generation of syntactically and semantically correct text [@arxiv:1308.0850]. - -As deep learning models that directly output (molecular) graphs remain under-explored, generative neural networks for drug design typically represent chemicals with the simplified molecular-input line-entry system (SMILES), a standard string-based representation with characters that represent atoms, bonds, and rings [@tag:Segler2017_drug_design]. -This allows treating molecules as sequences and leveraging recent progress in recurrent neural networks. -Gómez-Bombarelli et al. designed a SMILES-to-SMILES autoencoder to learn a continuous latent feature space for chemicals [@tag:Gomezb2016_automatic]. -In this learned continuous space it was possible to interpolate between continuous representations of chemicals in a manner that is not possible with discrete -(e.g. bit vector or string) features or in symbolic, molecular graph space. -Even more interesting is the prospect of performing gradient-based or Bayesian optimization of molecules within this latent space. -The strategy of constructing simple, continuous features before applying supervised learning techniques is reminiscent of autoencoders trained on high-dimensional EHR data [@tag:BeaulieuJones2016_ehr_encode]. + +In the past few years a large number of techniques for the generative modeling and optimization of molecules with deep learning have been explored, including recursive neural networks, variational autoencoders, generative adversarial networks, and reinforcement learning -- for a review see Elton, et al.[@tag:Elton_molecular_design_review] + +Building off the large amount of work that has already gone into text generation,[@arxiv:1308.0850] many generative neural networks for drug design represent chemicals with the simplified molecular-input line-entry system (SMILES), a standard string-based representation with characters that represent atoms, bonds, and rings [@tag:Segler2017_drug_design]. + +The first successful demonstration of a deep learning based approach for molecular optimization occured in 2016 with the development of a SMILES-to-SMILES autoencoder capable of learning a continuous latent feature space for molecules[@tag:Gomezb2016_automatic]. +In this learned continuous space it is possible to interpolate between molecular structures in a manner that is not possible with discrete +(e.g. bit vector or string) features or in symbolic, molecular graph space. Even more interesting is that one can perform gradient-based or Bayesian optimization of molecules within this latent space. The strategy of constructing simple, continuous features before applying supervised learning techniques is reminiscent of autoencoders trained on high-dimensional EHR data [@tag:BeaulieuJones2016_ehr_encode]. A drawback of the SMILES-to-SMILES autoencoder is that not all SMILES strings produced by the autoencoder's decoder correspond to valid chemical structures. -Recently, the Grammar Variational Autoencoder, which takes the SMILES grammar into account and is guaranteed to produce syntactically valid SMILES, has been proposed to alleviate this issue [@arxiv:1703.01925]. +The Grammar Variational Autoencoder, which takes the SMILES grammar into account and is guaranteed to produce syntactically valid SMILES, helps alleviate this issue to some extent [@arxiv:1703.01925]. Another approach to *de novo* design is to train character-based RNNs on large collections of molecules, for example, ChEMBL [@doi:10.1093/nar/gkr777], to first obtain a generic generative model for drug-like compounds [@tag:Segler2017_drug_design]. -These generative models successfully learn the grammar of compound representations, with 94% [@tag:Olivecrona2017_drug_design] or nearly 98% [@tag:Segler2017_drug_design] of generated SMILES corresponding to valid molecular structures. -The initial RNN is then fine-tuned to generate molecules that are likely to be active against a specific target by either continuing training on a small set of positive examples [@tag:Segler2017_drug_design] or adopting reinforcement learning strategies [@tag:Olivecrona2017_drug_design; @arxiv:1611.02796]. -Both the fine-tuning and reinforcement learning approaches can rediscover known, held-out active molecules. -The great flexibility of neural networks, and progress in generative models offers many opportunities for deep architectures in *de novo* design (e.g. the adaptation of GANs for molecules). +These generative models successfully learn the grammar of compound representations, with 94% [@tag:Olivecrona2017_drug_design] or nearly 98% [@tag:Segler2017_drug_design] of generated SMILES corresponding to valid molecular structures. The initial RNN is then fine-tuned to generate molecules that are likely to be active against a specific target by either continuing training on a small set of positive examples [@tag:Segler2017_drug_design] or adopting reinforcement learning strategies [@tag:Olivecrona2017_drug_design; @arxiv:1611.02796]. Both the fine-tuning and reinforcement learning approaches can rediscover known, held-out active molecules. + +Reinforcement learning approaches where operations are performed directly on the molecular graph bypass the need to learn the details of SMILES syntax, allowing the model to focus purely on chemistry. Additionally, they seem to require less training data and generate more valid molecules since they are constrained by design only to graph operations which satisfy chemical valiance rules.[@tag:Elton_molecular_design_review] A reinforcement learning agent developed by Zhou et al. demonstrated superior molecular optimization performance on certain easy to compute metrics when compared with other deep learning based approaches such as the Junction Tree VAE, Objective Reinforced Generative Adversarial Network, and Graph Convolutional Policy Network.[@doi:10.1038/s41598-019-47148-x] As another example, Zhavoronkov et al. used generative tensorial reinforcement learning to discover potent inhibitors of discoidin domain receptor 1 (DDR1).[@tag:Zhavoronkov2019_drugs] Their work is unique in that six lead candidates discovered using their approach were synthesized and tested in the lab, with 4/6 achieving some degree of binding to DDR1.[@tag:Zhavoronkov2019_drugs] + +It is worth pointing out that it has been shown that classical genetic algorithms can compete with many of the most advanced deep learning methods for molecular optimization.[@doi:10.1246/cl.180665; @doi:10.1039/C8SC05372C] Such genetic algorithms use hard coded rules based possible chemical reactions to generate molecular structures [@doi:10.1021/acs.jmedchem.5b01849]. Still, there are many avenues for improving current deep learning systems and the future of the field looks bright. diff --git a/content/06.discussion.md b/content/06.discussion.md index 364ff199..910a02c7 100644 --- a/content/06.discussion.md +++ b/content/06.discussion.md @@ -6,14 +6,12 @@ Here we examine these factors that may impede further progress, ask what steps h ### Customizing deep learning models reflects a tradeoff between bias and variance Some of the challenges in applying deep learning are shared with other machine learning methods. -In particular, many problem-specific optimizations described in this review reflect a recurring universal tradeoff---controlling the flexibility of a model in order to maximize predictivity. -Methods for adjusting the flexibility of deep learning models include dropout, reduced data projections, and transfer learning (described below). -One way of understanding such model optimizations is that they incorporate external information to limit model flexibility and thereby improve predictions. -This balance is formally described as a tradeoff between "bias and variance" +In particular, many problem-specific optimizations described in this review reflect a recurring universal tradeoff---controlling the flexibility of a model in order to maximize generalizability and prevent overfitting. +Methods for for preventing overfitting in deep learning models include adding regularization terms to the loss, dropout, using reduced data projections, and early stopping. +The need for balance between model expressiveness and overfitting is formally described as a tradeoff between "bias and variance" [@url:http://www.deeplearningbook.org/]. -Although the bias-variance tradeoff is common to all machine learning applications, recent empirical and theoretical observations suggest that deep learning models may have uniquely advantageous generalization properties [@tag:Zhang2017_generalization; @tag:Lin2017_why_dl_works]. -Nevertheless, additional advances will be needed to establish a coherent theoretical foundation that enables practitioners to better reason about their models from first principles. +Although the bias-variance tradeoff is is important to take into account in many machine learning tasks, recent empirical and theoretical observations suggest that deep neural networks have uniquely advantageous generalization properties and do not obey the tradeoff as expected [@tag:Belkin2019_PNAS; @tag:Zhang2017_generalization; @tag:Lin2017_why_dl_works]. According to the bias-variance theory, many of the most successful deep neural networks have so many free parameters they should overfit.[@tag:Belkin2019_PNAS] It has been shown that deep neural networks operate in a regime where they can exactly interpolate their training data yet are still able to generalize.[@tag:Belkin2019_PNAS] Thus, poor generalizability can often be remedied by adding more layers and increasing the number of free parameters, in conflict with the classic bias-variance theory. Additional advances will be needed to establish a coherent theoretical foundation that enables practitioners to better reason about their models from first principles. #### Evaluation metrics for imbalanced classification @@ -106,18 +104,22 @@ As a result, several opportunities for innovation arise: understanding the cause Unfortunately, uncertainty quantification techniques are underutilized in the computational biology communities and largely ignored in the current deep learning for biomedicine literature. Thus, the practical value of uncertainty quantification in biomedical domains is yet to be appreciated. -### Interpretation +### Interpretability -As deep learning models achieve state-of-the-art performance in a variety of domains, there is a growing need to make the models more interpretable. -Interpretability matters for two main reasons. -First, a model that achieves breakthrough performance may have identified patterns in the data that practitioners in the field would like to understand. -However, this would not be possible if the model is a black box. -Second, interpretability is important for trust. -If a model is making medical diagnoses, it is important to ensure the model is making decisions for reliable reasons and is not focusing on an artifact of the data. -A motivating example of this can be found in Caruana et al. [@tag:Caruana2015_intelligible], where a model trained to predict the likelihood of death from pneumonia assigned lower risk to patients with asthma, but only because such patients were treated as higher priority by the hospital. -In the context of deep learning, understanding the basis of a model's output is particularly important as deep learning models are unusually susceptible to adversarial examples [@tag:Nguyen2014_adversarial] and can output confidence scores over 99.99% for samples that resemble pure noise. +As deep learning models achieve state-of-the-art performance in a variety of domains, there is a growing need to make the models more interpretable. There are several important reasons to care about interpretability. -As the concept of interpretability is quite broad, many methods described as improving the interpretability of deep learning models take disparate and often complementary approaches. +Firstly, a model that achieves breakthrough performance may have identified patterns in the data that practitioners in the field would like to understand. +For instance, interpreting a model for predicting chemical properties from molecular graphs may illuminate previously unknown structure-property relations. +It is also useful to see if a model is using known relationships - if not, this may suggest a way to improve the model. +Finally, there is a chance that the model may have learned relationships that are known to be wrong. This can be due to improper training data or due to overfitting on spurious correlations in the training data. + +This is particularly important if a model is making medical diagnoses. A motivating example of this can be found in Caruana et al. [@tag:Caruana2015_intelligible], where a model trained to predict the likelihood of death from pneumonia assigned lower risk to patients with asthma, but only because such patients were treated as higher priority by the hospital. + +It has been shown that deep learning models are unusually susceptible to carefully crafted adversarial examples [@tag:Nguyen2014_adversarial] and can output confidence scores over 99.99% for samples that resemble pure noise. While this is largely still an unsolved problem, the interpretation of deep learning models can help understand these failure modes and how to prevent them. + +Several different levels of interpretability can be distinguished. Consider a prototypical CNN used for image classification. At a high level, one can perform an occulusion or sensitivity analysis to determine what sections of an image are most important for making a classification, generating a "saliency" heatmap. Then, if one wishes to understand what is going on in the layers of the model, several tools have been developed for visualizing the learned feature maps, such as the deconvnet[@tag:Zeiler2013_visualizing]. Finally, if one wishes to analyze the flow of information through a deep neural network layer-wise relevance propagation can be performed to see how each layer contributes to different classifications.[@tag:Montavon2018_visualization] + +A starting point for many discussions of interpretability is the interpretability-accuracy trade-off. The trade-off assumes that only simple models are interpretable and often a delineation is made between “white box" models (linear regression, decision trees) that are assumed to be not very accurate and “black box" models (neural networks, kernel SVMs) which are assumed to be more accurate. This view is becoming outmoded, however with the development of sophisticated tools for interrogating and understanding deep neural networks.[@tag:Montavon2018_visualization; @tag:Zeiler2013_visualizing] Still, this trade-off motivates a common practice whereby a easy to interpret model is trained next to a hard to interpret one. For instance, in the example discussed by Caruana et al. mentioned earlier, a rule-based model was trained next to a neural network using the same training data to understand the types of relations were learned by the neural network. More recently, a method for "distilling" a neural network into a decision tree has been developed.[@tag:Frosst2017_distilling] #### Assigning example-specific importance scores diff --git a/content/citation-tags.tsv b/content/citation-tags.tsv index b4eb9efe..ee40470f 100644 --- a/content/citation-tags.tsv +++ b/content/citation-tags.tsv @@ -20,6 +20,7 @@ Bar2015_nonmed_tl doi:10.1117/12.2083124 Barash2010_splicing_code doi:10.1038/nature09000 Baxt1991_myocardial doi:10.7326/0003-4819-115-11-843 BeaulieuJones2016_ehr_encode doi:10.1016/j.jbi.2016.10.007 +Belkin2019_PNAS doi:10.1073/pnas.1903070116 Bengio2015_prec arxiv:1412.7024 Berezikov2011_mirna doi:10.1038/nrg3079 Bergstra2011_hyper url:https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf @@ -66,6 +67,7 @@ Duvenaud2015_graph_conv url:http://papers.nips.cc/paper/5954-convolutional-netwo Edwards2015_growing_pains doi:10.1145/2771283 Ehran2009_visualizing url:http://www.iro.umontreal.ca/~lisa/publications2/index.php/publications/show/247 Elephas url:https://github.com/maxpumperla/elephas +Elton_molecular_design_review doi:10.1039/C9ME00039A Errington2014_reproducibility doi:10.7554/eLife.04333 Eser2016_fiddle doi:10.1101/081380 Esfahani2016_melanoma doi:10.1109/EMBC.2016.7590963 @@ -76,6 +78,7 @@ Feinberg2018 doi:10.1056/NEJMra1402513 Finnegan2017_maximum doi:10.1101/105957 Fong2017_perturb doi:10.1109/ICCV.2017.371 Fraga2005 doi:10.1073/pnas.0500398102 +Frosst2017_distilling arxiv:1711.09784 Fu2019 doi:10.1109/TCBB.2019.2909237 Gal2015_dropout arxiv:1506.02142 Gaublomme2015_th17 doi:10.1016/j.cell.2015.11.009 @@ -184,6 +187,7 @@ Meissner2008 doi:10.1038/nature07107 Metaphlan doi:10.1038/nmeth.2066 Meng2016_mllib arxiv:1505.06807 Min2016_deepenhancer doi:10.1109/BIBM.2016.7822593 +Montavon2018_visualization doi:10.1016/j.dsp.2017.10.011 Momeni2018 doi:10.1101/438341 Moritz2015_sparknet arxiv:1511.06051 Mordvintsev2015_inceptionism url:http://googleresearch.blogspot.co.uk/2015/06/inceptionism-going-deeper-into-neural.html @@ -310,7 +314,8 @@ Yoon2016_cancer_reports doi:10.1007/978-3-319-47898-2_21 Yosinski2014 url:https://papers.nips.cc/paper/5347-how-transferable-are-features-in-deep-neural-networks Yosinksi2015_understanding arxiv:1506.06579 Yu2016_melanoma_resnet doi:10.1109/TMI.2016.2642839 -Zeiler2013_visualizing arxiv:1311.2901 +Zhavoronkov2019_drugs doi:10.1038/s41587-019-0224-x +Zeiler2013_visualizing doi:10.1007/978-3-319-10590-1_53 Zeng2015 doi:10.1186/s12859-015-0553-9 Zeng2016_convolutional doi:10.1093/bioinformatics/btw255 Zhang2015_multitask_tl doi:10.1145/2783258.2783304