-
-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathlinear_operators_overview.tex
1061 lines (963 loc) · 60.7 KB
/
linear_operators_overview.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
%!TeX shellEscape = restricted
% !TEX program = pdflatex
%!TeX enableSynctex = true
\documentclass[11pt]{article}
\usepackage{url,amsmath,amsfonts}
\usepackage[capitalise,noabbrev]{cleveref} %
\crefname{equation}{}{} %
%\usepackage{minted} %If require code snippets, turn back on
\usepackage[nohead]{geometry}
%\usepackage{tikz}
%\usetikzlibrary{decorations.pathreplacing,angles,quotes,calligraphy}
\usepackage{graphicx}
%Some macros
\newcommand{\set}[1]{\ensuremath{\left\{{#1}\right\}}}
\newcommand{\R}{\ensuremath{\mathbb{R}}}
\newcommand{\D}[1][]{\ensuremath{\partial_{#1}}}
\geometry{left=1in,right=1in,top=0.6in,bottom=1in}
\begin{document}
\title{Discretizing Linear and Affine Operators Overview}
\author{}
\date{}
\maketitle
\section{Overview of Notation}
Some general notation, independent of operators and discretization
\begin{itemize}
\item For a given variable $q$, define the notation $q^{-} \equiv \min\set{q,0}$ and $q^{+} \equiv \max\set{q,0}$, which will be useful for defining finite-differences with an upwind scheme. This can apply to vectors as well. For example, $q_m^{-} = q_m$ if $q_m < 0$ and $0$ if $q_m > 0$, and $q^{-} \equiv \set{q^{-}_m}_{m=1}^{M}$.
\item Let $W_t$ be the Wiener process with the integral defined by the Ito interpretation
\item Derivatives are denoted by the operator $\D$ and univariate derivatives such as $\D[x]\tilde{u}(x) \equiv u'(x)$.
\item We will denote continuous functions, prior to discretization, like $\tilde{u}(x)$. The discretization of $\tilde{u}(x)$ on $x\in \R^M$ is denoted $u \in \R^M$ and the discretization of $\tilde{u}(x)$ on $\bar{x}$ is $\bar{u} \in \R^{\bar{M}}$.
\item Some special matrices to help in the composition notation:
\begin{itemize}
\item $\mathbf{I}_N$ is the $N\times N$ identity matrix. Always drop the subscript when the dimensions are unambiguous, as it would be the same in the code
\item $\mathbf{0}_N$ is the column vector of $N$ $0$s, and $\mathbf{0}_N^{\top}$ a row vector
\item $\mathbf{0}_{N\times M}$ is the $N\times M$ matrix of $0$s
\end{itemize}
\item An \textit{affine} operator $A$ can be decomposed into a \textit{linear operator}, denoted $A_L$, and a \textit{bias} denoted $A_b$, such that for all $x$ in the domain,
$$
A x = A_L x + A_b
$$
\item In the case of an affine operator on a discrete space, $A : \R^M \to \R^N$,
\begin{itemize}
\item We can decompose this into the linear operator $A_L \in \R^{N\times M}$ and bias vector $A_b \in \R^{N}$ such that for all $x\in\R^M$, $A x = A_L x + A_b$
\end{itemize}
\end{itemize}
The purpose of these notes is to discretize affine or linear operators with finite differences.\footnote{These are often the infinitesimal generator of a stochastic process. See \url{https://en.wikipedia.org/wiki/Infinitesimal_generator_(stochastic_processes)} for some formulas and interpretation for diffusions, and \url{https://en.wikipedia.org/wiki/Transition_rate_matrix}} To set some notation and definitions on operators,
\begin{itemize}
\item Denote a typical affine operator on the space of continuous functions as $\tilde{A}$, and $\tilde{L}$ for a linear operator. When these are discretized on a particular grid, denote them as $A$ and $L$ accordingly.
\item The baseline domain of the operator is on $[x^{\min}, x^{\max}]$.
\item Form a grid on the domain with $M$ points, $\set{x_m}_{m=1}^{M}$ with $x_1 = x^{\min}$ and $x_{M} = x^{\max}$ when. After discretizing, we can sometimes denote the grid with the variable name, i.e. $x \equiv \set{x_m}_{m=1}^M$. In the simple case of a uniform grid, $\Delta \equiv x_{m+1} - x_m$ for all $m < M$.
\item A core part of the discretization process will be to expand the variable onto the \textit{extension} (i.e. including any boundary points required for the boundary conditions). The set of boundary points will be referred to as $S_E$. If there are $M$ points in the grid, and $M_E$ points required for the boundary conditions (i.e. $|S_E| = M_E$), then define $\bar{M} = M + M_E$ as the total set of points on the extended domain. We will denote the extended domain as $\bar{x} \in \R^{\bar{M}}$.
\item For any arbitrary continuous function $\tilde{y}(x)$ defined in the whole space of $x$, we define $\bar{y}$ as its discretization on the whole domain of $x$ and $y$ as the discretization only for interior points of the domain. So while $\bar{y}$ has length $\bar{M}$, $y$ has length $M$.
\end{itemize}
\section{General Overview of Discretization and Boundary Values}\label{sec:general}
\subsection{Simple Differential Operators}\label{sec:general-simple}
Take a simple linear or affine differential operator $\tilde{A}$, (possibly affine) boundary conditions $\tilde{B}$ and the function of interest $\tilde{u}(x)$. The general problem to solve is to find the $\tilde{u}(x)$ such that.
% \footnote{\textbf{TODO:} Is this correct? Can the $\tilde{L}$ really be a more general $\tilde{A}$? Also, is the $\tilde{B}\tilde{u}(x) = \tilde{b}(x)$ really a good way to write down the possibly affine boundary conditions?}
\begin{align}
\tilde{A} \tilde{u}(x) &= 0\label{eq:A-u-DE}\\
\tilde{B} \tilde{u}(x) &= 0\label{eq:B-u-DE}
\end{align}
For linear $\tilde{A}$, we will denote it as $\tilde{L}$ to emphasize the fact that it's not affine. The discretization process generates the following objects:
\begin{itemize}
\item $B\in \R^{M_E \times \bar{M}}$ is the (possibly affine) \textit{boundary condition operator}, which satisfies the equation
\begin{align}
B \bar{u} = \mathbf{0}_{M_E}\label{eq:B_operator_block}
\end{align}
for any $\bar{u}$ in the space of functions that satisfy the discretized boundary conditions. \footnote{Notice that $B$ is not necessarily unique. The choice of $B$ is exactly the choice of boundary value discretization. For instance, choosing to do first or second order Neumann border conditions is simply the choice of the operator $B$.} For affine $B$, we can write out \cref{eq:B_operator_block} as
\begin{align}
B_L\bar{u} = -B_b\label{eq:B_operator_block_expanded}
\end{align}
\item $R\in \R^{M\times \bar{M}}$ is the linear \textit{restriction operator} which is defined by the domain. It removes columns which are not in the interior. It fulfills
\begin{align}
R \bar{u} = u \label{eq:R_operator}
\end{align}
\item $Q^B : \R^M \to \R^{\bar{M}}$ is the (potentially affine) \textit{boundary extrapolation operator} associated with $B$. The operator $Q^B$ is defined as fulfilling the following relationships (keeping in mind that $Q^B$ is affine and $R$ is linear)
\begin{align}
Q^B R\bar{u} &= \bar{u}\label{eq:Q_operator_1}\\
B Q^B u &= \mathbf{0}_{M_E} \label{eq:Q_operator_2}
\end{align}
To give intuition, for any $\bar{u}$ that satisfies the border conditions:\footnote{Notice that $Q = R^{-1}$ if R is square, and this is only true as maps on functions which satisfy the boundary. Furthermore, in order for \cref{eq:Q_operator_1} to hold on trivial $u$, we need that the interior of $Q$ is identity, so it is defined by its first and last rows.} \cref{eq:Q_operator_1} says that finding the restriction of the function and then extrapolating to extension yields the same function, and \cref{eq:Q_operator_2} says that the boundary extrapolation of the interior of the function, $u$, fulfills the boundary value.
\item $A : \R^{\bar{M}} \to \R^M$ is the (possibly affine) \textit{stencil operator}. It maps the extended domain to the interior by applying a stencil, which is determined by the derivative operator and the numerical differentiation scheme. As with the continous case, we will denote the stencil operator as $L$ if it is linear.
\item The \textit{discretized derivative operator} is $A^B : \R^M \to \R^M$. We use the $B$ superscript to emphasize the operator's dependence on the boundary condition.
The operator is composed as $A^B = AQ^B$. The intuition is that first $Q^B$ is applied to the interior points to add the ``ghost nodes'' corresponding to the boundary condition, and then the stencil operator $A$ is applied to the whole domain, including the ghost nodes. $A^B$ is in general affine if $A$ and/or $Q^B$ are affine, and linear if both of them are linear, in which case we will denote it as $L^B$ to emphasize the linearity.
\end{itemize}
\subsection{Composite Differential Operators}\label{sec:general-composite}
The discretization of a composite differential operator follows the same framework as \ref{sec:general-simple}. However, instead of deriving the stencil operator $A$ directly it is customary to think of it as being composed of several component operators. For this document we will only consider the simplest of compositions: linear combinations. For more complex examples, especially high-dimensional ones, we may encounter more advanced form of compositions such as tensor products/sums.
Let $\tilde{A}$ be the linear combination of several differential operators: $\tilde{A} = \tilde{A}_1 + \tilde{A}_2 + \cdots + \tilde{A}_n$. To get the discretized entities of \ref{sec:general-simple}, we will proceed as follows:
\begin{itemize}
\item First, discretize each $\tilde{A}_k$ separately and get its $B_k$, $R_k$, $Q^{B_k}$ and $A_k$ along with the extended domain $\bar{u}_k$.\footnote{Even if the physical boundary condition is the same for all components, the required boundary points may still be different because of the order of differentiation and/or numerical approximation. For example, a second-order approximation to $\D[xx]$ requires one boundary point at each end, whereas a fourth-order approximation requires two.}
\item Let $\bar{u}$ be the union of all the $\bar{u}_k$, i.e. the largest common extended domain. Usually this is just the $\bar{u}_k$ corresponding to the ``biggest'' component operator. We need to work out the $B$, $R$ and $Q_B$ for $\bar{u}$, which is trivial if it coincides with one of the $\bar{u}_k$.
\item The composed stencil operator defined on $\bar{u}$ is $A = A_1E_1 + A_2E_2 + \cdots + A_nE_n$, with $E_k \in \R^{M_{E,k} \times M_E}$ the \textit{extension operator} for $A_k$. In practice we don't need $E_k$ explicitly as $A_kE_k$ can be constructed easily by padding $A_k$ with zeros at columns corresponding to boundary points that are not used. For example, if $L_1 \in \R^{2 \times 3}$ but the common extended domain include an extra boundary point at the end, then
\begin{align*}
L_kE_k &= \begin{bmatrix}
L_k & \mathbf{0}_2
\end{bmatrix} \in \R^{2 \times 4}
\end{align*}
which extends $L_k$ to the common extended domain.\footnote{For affine $A_k$, the rule of affine algebra applies: $(A_kE_k)_L = A_{k,L}E_k$, $(A_kE_k)_b = A_{k,b}$. In other words, the bias term remains unchanged and the linear part gets padded with zeros.} The extended stencil operators can now be linearly combined as they are of the same shape.
Alternatively, $E_k$ can be viewed as the \textit{restriction operator} mapping $\bar{u}$ to $\bar{u}_k$ by stripping away unused boundary points. In other words, instead of viewing $A_kE_k\bar{u}$ as $(A_kE_k)\bar{u}$ we can view it as $A_k(E_k\bar{u}) = A_k\bar{u}_k$.\footnote{This can make a big difference in application depending on the details of implemnetation: whether $A_k$ is sparese or not, how the grid is implemented, etc.}
\item The discretized derivative operator is still $A^B = AQ^B$.
\end{itemize}
\section{Time-Invariant Stochastic Process Examples}\label{sec:examples}
\subsection{Definitions and Notation for Examples}
Let $x_t$ be a stochastic process for a univariate function defined on a continuous domain $x \in (x^{\min}, x^{\max})$ where $-\infty < x^{\min} < x^{\max} < \infty$. We will assume throughout that the domain is time-invariant. The infinitesimal generator associated with $x_t$ is denoted as $\tilde{L}^s$.
Let the payoff in state $x$ be $\tilde{p}(x)$ (for a simple example, we can choose $\tilde{p}(x) = x$). If payoffs are discounted at rate $r > 0$, then the simple HJBE for $\tilde{u}(x)$ is
\begin{align}
r \tilde{u}(x) &= \tilde{p}(x) + \tilde{L}^s \tilde{u}(x)\label{eq:general-stationary-HJBE}
\end{align}
We can define another operator $\tilde{L} \equiv r\tilde{I} - \tilde{L}^s$ where $\tilde{I}$ is the identity operator. With this we can rearrange \cref{eq:general-stationary-HJBE} as
\begin{align}
\tilde{L} \tilde{u}(x) &= \tilde{p}(x)
\end{align}
Common boundary conditions for $\tilde{L}$ are:
\begin{itemize}
\item $\D[x]\tilde{u}(x^{\min}) = b^{\min}$, $\D[x]\tilde{u}(x^{\max}) = b^{\max}$ for \textit{Neumann boundaries}.
\item $\tilde{u}(x^{\min}) = b^{\min}$, $\tilde{u}(x^{\max}) = b^{\max}$ for \textit{Dirichlet boundaries}.
\item Mixed boundaries, e.g. $\tilde{u}(x^{\min}) = b^{\min}$, $\D[x]\tilde{u}(x^{\max}) = b^{\max}$ for Dirichlet boundary at the bottom and Neumann boundary at the top.
\end{itemize}
It is customary to refer to homogeneous (i.e. $b^{\min} = b^{\max} = 0$) Neumann boundaries as \textit{reflecting barriers/boundaries}, and homogeneous Dirichlet boundaries as \textit{absorbing barriers/boundaries}.
The stencil operators used by the examples are:
\begin{itemize}
\item Second-order approximation to $\D[xx]$ (central differencing):
\begin{align}
L_2 &\equiv \frac{1}{\Delta^2}\begin{bmatrix}
1&-2&1&\dots&0&0&0\\
0&1&-2&\dots&0&0&0\\
\vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
0&0&0&\dots&-2&1&0\\
0&0&0&\cdots&1&-2&1
\end{bmatrix}_{M\times(M+2)}\label{eq:L-2}
\end{align}
$L_2$ requires one boundary point at each end of the domain.
\item First-order approximation to $\D[x]$:
\begin{align}
L_1 &\equiv \frac{1}{\Delta}\begin{bmatrix}
-1&1&0&\dots&0&0&0\\
0&-1&1&\dots&0&0&0\\
\vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
0&0&0&\dots&-1&1&0\\
0&0&0&\cdots&0&-1&1
\end{bmatrix}_{M\times(M+1)}\label{eq:L-1}
\end{align}
$L_1$ requires one boundary point at the top or bottom end of the domain. For \textit{forward differencing} the extra point is at the top, while for \textit{backward differencing} it is at the bottom. To avoid ambiguity we shall refer to the forward and backward stencils as $L^+_1$ and $L^-_1$ respectively.
\end{itemize}
In most examples the largest common extended domain is the same one used by $L_2$, which has $M_E = 2$ with $S_E = \{0, M+1\}$. The restriction operator is
\begin{align}
R_2 &\equiv \begin{bmatrix} \mathbf{0}_M & \mathbf{I}_M & \mathbf{0}_M\end{bmatrix}\label{eq:R-2}
\end{align}
The boundary operators for this domain with the most common boundary conditions are:
\begin{itemize}
\item Neumann boundary at both sides:
\begin{align}
B_{RR,L} &\equiv \begin{bmatrix}
1&-1&0&\dots&0&0&0\\
0&0&0&\cdots&0&-1&1
\end{bmatrix}_{2\times (M+2)}\quad
B_{RR,b} = \begin{bmatrix}b^{\min}\Delta\\-b^{\max}\Delta\end{bmatrix}\label{eq:B-RR}
\end{align}
\item Dirichlet boundary at both sides:
\begin{align}
B_{AA,L} &\equiv \begin{bmatrix}
1&0&0&\dots&0&0&0\\
0&0&0&\cdots&0&0&1
\end{bmatrix}_{2\times (M+2)}\quad
B_{AA,b} = \begin{bmatrix}-b^{\min}\\-b^{\max}\end{bmatrix}\label{eq:B-AA}
\end{align}
\item Dirichlet boundary at the bottom and Neumann boundary at the top:
\begin{align}
B_{AR,L} &\equiv \begin{bmatrix}
1&0&0&\dots&0&0&0\\
0&0&0&\cdots&0&-1&1
\end{bmatrix}_{2\times (M+2)}\quad
B_{AR,b} = \begin{bmatrix}-b^{\min}\\-b^{\max}\Delta\end{bmatrix}\label{eq:B-AR}
\end{align}
\end{itemize}
For homogeneous boundaries (i.e. reflecting/absorbing) the $B$ operators are linear. In this case we will slightly abuse the notation and simply drop the $L$ subscript when referring to the linear operator.
The corresponding boundary extrapolation operators are:
\begin{align}
Q^{B_{RR}}_L &\equiv \begin{bmatrix}1&0&\cdots&0&0\\&&\mathbf{I}_M&&\\0&0&\cdots&0&1\end{bmatrix}\quad
Q^{B_{RR}}_b = \begin{bmatrix}-b^{\min}\Delta\\\mathbf{0}_M\\b^{\max}\Delta\end{bmatrix}\label{eq:Q-RR}\\
Q^{B_{AA}}_L &\equiv \begin{bmatrix}0&0&\cdots&0&0\\&&\mathbf{I}_M&&\\0&0&\cdots&0&0\end{bmatrix}\quad
Q^{B_{AA}}_b = \begin{bmatrix}b^{\min}\\\mathbf{0}_M\\b^{\max}\end{bmatrix}\label{eq:Q-AA}\\
Q^{B_{AR}}_L &\equiv \begin{bmatrix}0&0&\cdots&0&0\\&&\mathbf{I}_M&&\\0&0&\cdots&0&1\end{bmatrix}\quad
Q^{B_{AR}}_b = \begin{bmatrix}b^{\min}\\\mathbf{0}_M\\b^{\max}\Delta\end{bmatrix}\label{eq:Q-AR}
\end{align}
Again when the boundaries are homogeneous, we will drop the $L$ subscript.
\subsection{Stationary HJBE with Reflecting Barriers}\label{sec:simple-reflecting-example}
Take the stochastic process
$$
d x_t = d W_t
$$
with reflecting barriers at $x^{\min}$ and $x^{\max}$. The partial differential operator (infinitesimal generator) associated with the stochastic process is
\begin{equation}
\tilde{L}^s \equiv \frac{1}{2}\D[xx]\label{eq:L-s-nodrift}
\end{equation}
For this process, we derive below all of the matrices of \cref{sec:general} and the system of equations to solve for $\tilde{u}(x)$ in \cref{eq:general-stationary-HJBE}. We still have \textbf{to do}:
\begin{itemize}
\item Check that the code \url{operator_examples\simple_stationary_HJBE_reflecting.jl} is correct
\end{itemize}
Consider
\begin{align}
r \tilde{u}(x) &= \tilde{L}^s \tilde{u}(x) - x\label{HJBE_reflecting_barriers_PDE}\\
\intertext{Define the operator $\tilde{L}$ and rearrange,}
\tilde{L} \tilde{u}(x) &= x\\
\tilde{L}&\equiv r - \frac{1}{2}\partial_{xx}\label{HJBE_operator_first_example}
\end{align}
We first consider a one-dimension case where $x\in [x^{\min},x^{\max}]$. Let $M_E = 2$, $S_E = \{1,\bar{M}\}$, thereby $\Delta = \frac{x^{max}-x^{min}}{\bar{M}}$, and $\bar{M} = M+2$. From \cref{HJBE_operator_first_example} and given \cref{eq:L-2} from the previous section, the matrix form of operator $\tilde{L}$ can be defined, given as $A = \frac{L_2}{2}$.
By reflecting barriers, we can define $B$ just like as $B_{RR}$ in \cref{eq:B-RR} and then we have
\begin{equation}
B\bar{u} = \begin{bmatrix}
0\\
0
\end{bmatrix} \label{B_reflecting}
\end{equation}
Therefore, $\bar{u}(x_0) = \bar{u}(x_1)$ and $\bar{u}(x_{M+1}) = \bar{u}(x_M)$. It is important to notice that our choice for $B$ defines the linear relationship between the interior points and the boundary conditions.
Moreover, $R$ is again defined as in \cref{eq:R-2} and $Q$ is defined by $Q R\bar{u}\equiv Q_L R\bar{u}+Q_b = \bar{u}$, where
\begin{equation}
Q_L = \begin{bmatrix}
1& 0&\dots&0&0\\
& & \mathbf{I}_M & & \\
0&0&\dots&0&1
\end{bmatrix}%_{\bar{M}\times M}
\quad , \text{ } Q_b = \begin{matrix}
\mathbf{0}_{\bar{M}}
\end{matrix}\label{Q_reflecting}
\end{equation}
Then, it is easy to verify that \cref{affine_relation_1} and \cref{affine_relation_2} hold in this case. Additionally, it is worth to note that, since $Q_b = \mathbf{0}_{\bar{M}}$, $Q$ is a linear operator.
To solve $\bar{u}(x)$, we first solve interiors according to \cref{HJBE_reflecting_barriers_PDE} and the definition of operator $Q$, which provides us with two conditions:
\begin{align}
L \bar{u} &= x\label{solve_u_hat_cond_1}\\
Q R\bar{u} &= Q u = Q_L u+Q_b = \bar{u}\label{solve_u_hat_cond_2}
\end{align}
where the discretization of the linear operator $\tilde{L}$, defined on the extension, is
\begin{equation}
L \equiv ([\mathbf{0}_{M} \text{ } \mathbf{I}_{M} \text{ } \mathbf{0}_{M}] r - L_2)
\label{L_definition}
\end{equation}
Note that in doing the composition of the operator in \cref{L_definition}, we need to combine the $L_2$ (defined on $M+2$ for the extension) with $r I$, defined on $M$ points. In order to compose these, we need to extend the $r I$ operator with $0$s.
% \begin{align}
% P &= \begin{bmatrix}
% 0&r&0&\cdots&0&0&0\\
% 0&0&r&\cdots&0&0&0\\
% \vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
% 0&0&0&\cdots&r&0&0\\
% 0&0&0&\cdots&0&r&0\\
% \end{bmatrix}_{M\times\bar{M}}-A \\
% &= \begin{bmatrix}
% -1&2(1+r\Delta^2)&-1&\dots&0&0&0\\
% 0&-1&2(1+r\Delta^2)&\dots&0&0&0\\
% \vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
% 0&0&0&\dots&2(1+r\Delta^2)&-1&0\\
% 0&0&0&\cdots&-1&2(1+r\Delta^2)&-1
% \end{bmatrix}\frac{1}{2\Delta^{2}}
% \end{align}
Substitute \cref{solve_u_hat_cond_2} into \cref{solve_u_hat_cond_1}, we get
\begin{align}
L Q u = L (Q_L u+Q_b) = x
\end{align}
Since $Q_b = 0$ in this case, interiors are solved as
\begin{align}
u = (L Q_L)^{-1}x\label{solve_u_hat_in_terms_of_interiors}
\end{align}
%Then, by \cref{solve_u_hat_cond_2}, we can solve the extended state vector by
% \begin{align}
% \bar{u} = Q_L R\bar{u}+Q_b\label{solve_u_hat_in_terms_of_interiors}
% \end{align}
\subsection{Stationary HJBE with a Lower Absorbing Barrier}
Take the stochastic process
$$
d x_t = d W_t
$$
with an absorbing barrier at $x^{\min}$, and a reflecting barrier at $x^{\max}$. Again, the partial differential operator (infinitesimal generator) associated with the stochastic process is \cref{eq:L-s-nodrift}
For the absorbing barrier, when solving for the HJBE assume that $u(x^{\min}) = b^{\min}$ and $u'(x^{\max}) = 0$.
For this process, we derive below all of the matrices of \cref{sec:general} and the system of equations to solve for $\tilde{u}(x)$ in \cref{eq:general-stationary-HJBE}.
% We still have \textbf{to do}:
% \begin{itemize}
% \item Check that the code \url{operator_examples\simple_stationary_HJBE_reflecting.jl} is correct
% \end{itemize}
Again, we first consider a one-dimension case where $x\in [x^{\min},x^{\max}]$. Let $M_E = 2$, $S_E = \{1,\bar{M}\}$, thereby $\Delta = \frac{x^{\max}-x^{\min}}{\bar{M}}$, and $\bar{M} = M+2$. From \cref{HJBE_operator_first_example} and given \cref{eq:L-2} from the \cref{sec:examples}, the matrix form of operator $\tilde{L}$ is again defined \cref{L_definition}
According to lower absorbing barrier, we can define $B$ just like as $B_{AR}$ in \cref{eq:B-AR} and then we have
\begin{equation}
B\bar{u} = \begin{bmatrix}
b^{\min}\\
0
\end{bmatrix} \equiv b
\end{equation}\\
Therefore, $\bar{u}(x^{\min}) = x^{\min}$ and $\bar{u}(x_{M+1}) = \bar{u}(x_M)$.
Moreover, $R$ is again defined as in \cref{eq:R-2} and $Q$ is defined by defined by $Q R\bar{u}\equiv Q_L R\bar{u}+Q_b = \bar{u}$, where
\begin{equation}
Q_L = \begin{bmatrix}
& & \mathbf{0}_{1\times M} & & \\
& & \mathbf{I}_M & & \\
0&0&\dots&0&1
\end{bmatrix}%_{\bar{M}\times M}
\quad, \text{ } Q_b = \begin{bmatrix}
b^{\min}\\
0\\
\vdots\\
0
\end{bmatrix}%_{\bar{M}\times 1}
\end{equation}
Then, it is easy to verify that \cref{affine_relation_1} and \cref{affine_relation_2} hold in this case. Additionally, it is worth to note that, if the absorbing boundary was of Dirichlet$_0$ type, then $b^{\min} = 0$ and $Q_b = \mathbf{0}_{\bar{M}}$ and $Q$ would be a linear operator.
In this case, $L$ is given by \cref{L_definition}. According to conditions \cref{solve_u_hat_cond_1} and \cref{solve_u_hat_cond_2}, again we get
\begin{align}
L(Q_L R \bar{u}+Q_b) = x
\end{align}
With $Q_b\neq 0$, we can solve interiors as
\begin{align}
R\bar{u} = (L Q_L)^{-1}(x-L Q_b)
\end{align}
Then, the extended state vector $\bar{u}$ again can be similarly solved by \cref{solve_u_hat_in_terms_of_interiors}.
\subsection{Stationary HJBE with Only Drift}
Now, do the same after adding in constant drift (and manually choose the correct upwind direction!)
$$
d x_t = \mu dt
$$
With a generator
\begin{equation}
\tilde{L}^s \equiv \mu \D[x]\label{eq:L-s-drift}
\end{equation}
For this process, we derive below all of the matrices of \cref{sec:general}, paying special attention to the upwind direction, and the system of equations to solve for $\tilde{u}(x)$ in \cref{eq:general-stationary-HJBE}. We still have \textbf{to do}:
\begin{itemize}
\item Write julia code to solve for $\tilde{u}(x)$ with the grid
\item Check these for $\mu < 0$ and $\mu > 0$.
\end{itemize}
Since the choice of the first difference depends on the sign of drift $\mu$, we define $\mu^- =\min\{\mu, 0\}$ and $\mu^- =\max\{\mu, 0\}$.
Consider
\begin{align}
\tilde{L} \tilde{u}(x) &= x\label{HJBE_PDE_with_drifts}\\
\text{where }\tilde{L}&\equiv r - \mu\partial_{x}
\end{align}
We first consider a one-dimension case where $x\in [x^{\min},x^{\max}]$. Let $M_E = 2$ and thereby $\Delta = \frac{x^{\max}-x^{\min}}{\bar{M}}$ and $\bar{M} = M+2$.
Considering $\mu>0$, we must choose the forward first difference, thus the matrix form of operator $\tilde{L}$ can be defined as $L = \mu L_1^+$. Analogously, for $\mu<0$, we must choose the backward first difference, which implies that the matrix form of operator $\tilde{L}$ can be defined as $L = \mu L_1^-$% as in \cref{eq:L-1m}.
Considering the absorbing barriers, we can define $B$ just like as $B_{AA}$ in \cref{eq:B-AA} and then we have
\begin{equation}
B\bar{u} = \begin{bmatrix}
b^{\min}\\
b^{\max}
\end{bmatrix}
\end{equation}.\\
Therefore, $\bar{u}(x_0) = x^{\min}$ and $\bar{u}(x_{M+1}) = x^{\max}$.
Moreover, $R$ is again defined as in \cref{eq:R-2} and $Q$ is defined by $Q R\bar{u}\equiv Q_L R\bar{u}+Q_b = \bar{u}$, where
\begin{equation}
Q_L = \begin{bmatrix}
\mathbf{0}_{1\times M} \\
\mathbf{I}_M \\
\mathbf{0}_{1\times M}
\end{bmatrix}%_{\bar{M}\times M}
\quad, \text{ } Q_b = \begin{bmatrix}
b^{\min}\\
0\\
\vdots\\
b^{\max}
\end{bmatrix}%_{\bar{M}\times 1}
\end{equation}
Then, it is easy to verify that \cref{affine_relation_1} and \cref{affine_relation_2} hold in this case.
% In this example, the matrix $P$ in \cref{solve_u_hat_cond_1} becomes
% \begin{align}
% P = \frac{1}{\Delta}
% \begin{bmatrix}
% \mu^-&r\Delta-(\mu^--\mu^+)&-\mu^+&\dots&0&0&0\\
% 0&\mu^-&r\Delta-(\mu^--\mu^+)&\dots&0&0&0\\
% \vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
% 0&0&0&\dots&r\Delta-(\mu^--\mu^+)&-\mu^+&0\\
% 0&0&0&s&\mu^-&r\Delta-(\mu^--\mu^+)&-\mu^+
% \end{bmatrix}%_{\bar{M}\times\bar{M}}
Similarly, as $L$ is defined by \cref{L_definition} and the interiors are solved by \cref{solve_u_hat_cond_1} and \cref{solve_u_hat_cond_2}:
\begin{align}
u = (L Q_L)^{-1}(x-L Q_b)
\end{align}
Thus, the extended state vector $\bar{u}$ again can be similarly solved by \cref{solve_u_hat_in_terms_of_interiors}.
\subsection{Stationary HJBE with Reflecting Barriers and Drift}
Now, do the same after adding in constant drift (and manually choose the correct upwind direction!)
$$
d x_t = \mu dt + \sigma d W_t
$$
With a generator
$$
\tilde{L}^s \equiv \mu \D[x] + \frac{\sigma^2}{2}\D[xx]
$$
For this process, we derive below all of the matrices of \cref{sec:general}, paying special attention to the upwind direction, and the system of equations to solve for $\tilde{u}(x)$ in \cref{eq:general-stationary-HJBE}. We still have \textbf{to do}:
\begin{itemize}
\item Write julia code to solve for $\tilde{u}(x)$ with the grid
\item Check these for $\mu < 0$ and $\mu > 0$.
\end{itemize}
We first consider a one-dimension case where $x\in [x^{\min},x^{\max}]$. Let $M_E = 2$ and thereby $\Delta = \frac{x^{\max}-x^{\min}}{\bar{M}}$ and $\bar{M} = M+2$.
By combining operators from previous sections, in this case $L^s$ is defined as
\begin{align}
L^s &= \mu L_1^{-} +\frac{\sigma^2}{2}L_2\quad\text{if }\mu<0\\
L^s &=\mu L_1^{+} +\frac{\sigma^2}{2}L_2\quad\text{if }\mu>0\\
\intertext{And the composed operator,}
L &\equiv ([\mathbf{0}_{M} \text{ } \mathbf{I}_{M} \text{ } \mathbf{0}_{M}] r - L^s
\end{align}
Since barriers are reflecting, we can have the same boundary conditions as what we had in the case with reflecting barriers but no drifts. Hence, operators $R$, $B$ and $Q$ are defined by \cref{eq:R-2}, \cref{B_reflecting} and \cref{Q_reflecting}, respectively. Also, with some simple algebras, we can easily verify that \cref{affine_relation_1} and \cref{affine_relation_2} hold in this case.
% In this example, the matrix $P$ in \cref{solve_u_hat_cond_1} becomes
% \begin{align}
% P = \frac{1}{\Delta^{2}}
% \begin{bmatrix}
% -X&r\Delta^2-Y&-Z&\dots&0&0&0\\
% 0&-X&r\Delta^2-Y&\dots&0&0&0\\
% \vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
% 0&0&0&\dots&r\Delta^2-Y&-Z&0\\
% 0&0&0&\cdots&-X&r\Delta^2-Y&-Z
% \end{bmatrix}
% \end{align}
Given $L$ defined by \cref{L_definition}, the rest steps for solving interiors, $u$, and the extended state vector $\bar{u}$, are similar with what we did for previous examples.
\subsection{Stationary Bellman Equation with Reflecting Barriers State Varying Drift/Variance}
Now, do the same after adding in constant drift (and manually choose the correct upwind direction!)
$$
d x_t = \tilde{\mu}(x_t) dt + \tilde{\sigma}(x_t) d W_t
$$
With a generator
$$
\tilde{L}^s \equiv \tilde{\mu}(x) \D[x] + \frac{\tilde{\sigma}(x)^2}{2}\D[xx]
$$
For this process, we derive below all of the matrices of \cref{sec:general}, paying special attention to the upwind direction, and the system of equations to solve for $\tilde{u}(x)$ in \cref{eq:general-stationary-HJBE}. We still have \textbf{to do}:
\begin{itemize}
\item Write julia code to solve for $\tilde{u}(x)$ with the grid.
\begin{itemize}
\item Choose a $\tilde{u}(x)$ and $\tilde{\sigma}(x)$ functions, consider using geometric brownian motion as a test. That is:
\begin{align}
\tilde{L}^s &\equiv \bar{\mu} x \D[x] + \frac{\bar{\sigma}^2}{2}x^2\D[xx]
\end{align}
\end{itemize}
\end{itemize}
We first consider a one-dimension case where $x\in [x^{\min},x^{\max}]$. Let $M_E = 2$ and thereby $\Delta = \frac{x^{\max}-x^{\min}}{\bar{M}}$ and $\bar{M} = M+2$.
Again, consider $\bar{\mu}^- = \min\{\bar{\mu}, 0\}$ and $\bar{\mu}^+ = \max\{\bar{\mu}, 0\}$.
This case is similar with the previous one but with variable drift and variance. By combining operators $L$ from previous sections, in this case $L^s, L$ is defined as
\begin{align}
L^s = \mu(x)^- L_1^-+\mu(x)^+L_1^+ +\sigma(x) L_2
\end{align}
where
\begin{align*}
\mu(x)^- &=\begin{bmatrix}
\bar{\mu}^-x_1&0&\cdots&0\\
0&\bar{\mu}^-x_2&\cdots&0\\
\vdots&\vdots&\ddots&\vdots\\
0&0&\cdots&\bar{\mu}^-x_M
\end{bmatrix}\\
\mu(x)^+ &=\begin{bmatrix}
\bar{\mu}^+x_1&0&\cdots&0\\
0&\bar{\mu}^+x_2&\cdots&0\\
\vdots&\vdots&\ddots&\vdots\\
0&0&\cdots&\bar{\mu}^+x_M
\end{bmatrix}\\
\sigma(x)&=\begin{bmatrix}
\frac{(\bar{\sigma}x_1)^2}{2}&0&\cdots&0\\
0&\frac{(\bar{\sigma}x_2)^2}{2}&\cdots&0\\
\vdots&\vdots&\ddots&\vdots\\
0&0&\cdots&\frac{(\bar{\sigma}x_M)^2}{2}
\end{bmatrix}
\intertext{And the composed operator,}
L &\equiv ([\mathbf{0}_{M} \text{ } \mathbf{I}_{M} \text{ } \mathbf{0}_{M}] r - L^s
\end{align*}
Again, since barriers are reflecting, we can have the same boundary conditions as what we had in the case with reflceting barriers but no drifts. Hence, operators $R$, $B$ and $Q$ are defined by \cref{eq:R-2}, \cref{B_reflecting} and \cref{Q_reflecting}, respectively. Also, with some simple algebras, we can easily verify that \cref{affine_relation_1} and \cref{affine_relation_2} hold in this case.
% In this example, the matrix $P$ in \cref{solve_u_hat_cond_1} becomes
% \begin{align}
% P = \frac{1}{\Delta^{2}} \begin{bmatrix}
% -X_1&r\Delta^2-Y_1&-Z_1&\dots&0&0&0\\
% 0&-X_2&r\Delta^2-Y_2&\dots&0&0&0\\
% \vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\vdots\\
% 0&0&0&\dots&r\Delta^2-Y_{M-1}&-Z_{M-1}&0\\
% 0&0&0&\cdots&-X_M&r\Delta^2-Y_M&-Z_M
% \end{bmatrix}
% \end{align}
Again, given $L$ defined by \cref{L_definition}, the remaining steps for solving interiors, $u$, and the extended state vector, $\bar{u}$, are similar with what we did for previous examples.
\appendix
\section{Extended Equation and Gaussian Elimination}
\subsection{Overview}
The document focuses on solving the discretized equation on the interior $u$, and the information for the boundary condition is encoded in the boundary extrapolation operator $Q^B$. Alternatively, we can consider the discretized equation on the extended domain $\bar{u}$. We wish to demonstrate in this section that the two equations are indeed equivalent.
The starting point is again the continuous equation \cref{eq:A-u-DE} and \cref{eq:B-u-DE}. We discretize the domain and get the stencil operator $L$ and boundary operator $B$. For simplicity, we shall assume $L$ to be linear in this section, but the boundary conditions need not be homogenous. The discretized equations on the extended domain $\bar{u}$ are then:
\begin{itemize}
\item From \cref{eq:A-u-DE}: $L\bar{u} = p$.
\item From \cref{eq:B-u-DE}: $B_L\bar{u} = -B_b$ (this is just \cref{eq:B_operator_block_expanded}).
\end{itemize}
The two linear equations have the same number of unknowns, so they can stacked up:
\begin{align}
\begin{bmatrix} L \\ B_L \end{bmatrix} \bar{u} &=
\begin{bmatrix} p \\ -B_b \end{bmatrix}\label{eq:extended-stack}
\end{align}
In the examples we shall see that the extended equation \cref{eq:extended-stack} can be transformed to an equation on the interior by way of Gaussian elimination. We wish to show that the $Q^B$ matrix can be naturally generated using the elementary matrices associated with the elimination process, however the algebra is not in place yet. Nevertheless, for a known $Q^B$ the equivalence between the equations can be proved easily.\footnote{Note that the equation \cref{eq:extended-stack} does not necessarily have a unique solution. Nevertheless the reduced equation from Gaussian elimination should be the same we get from $LQ^Bu = p$.}
\subsection{Example 1: Diffusion with Reflecting Boundaries}\label{sec:appendixA-example1}
\subsubsection{Continuous Equation}
We consider a slightly modified example from \cref{sec:simple-reflecting-example}. The stochastic process in question is $dx_t = \sqrt{2}dW_t$ and a reflecting boundary is present at $x^{\min} = 0$ and $x^{\max} = 2$. The infinitesimal generator is then simply $\tilde{L}^s = \D[xx]$.
For discount rate $r > 0$ and payoff $\tilde{p}(x)$, the stationiary HJBE is then
\begin{align}
\tilde{L}u(x) &= \tilde{p}(x)\\
\tilde{L} &\equiv r\tilde{I} - \D[xx] \\
\D[x]\tilde{u}(0) &= \D[x]\tilde{u}(2) = 0
\end{align}
Here $\tilde{I}$ is the (continuous) identity operator.
\subsubsection{Discretized Equation}
We'll be using a uniform grid with $\Delta x = 1$, in other words $M = 3$ interior nodes. A second-order approximation to $\D[xx]$ is used which require $M_E = 2$ addition nodes, and a total of $\bar{M} = 5$ nodes on the whole domain. The discretized grid entities are:
\begin{align}
p &= \begin{bmatrix} \tilde{p}(0) & \tilde{p}(1) & \tilde{p}(2)\end{bmatrix}^{\top}\\
u &= \begin{bmatrix} \tilde{u}(0) & \tilde{u}(1) & \tilde{u}(2)\end{bmatrix}^{\top}\\
\bar{u} &= \begin{bmatrix} \tilde{u}(-1) & \tilde{u}(0) & \tilde{u}(1) & \tilde{u}(2) & \tilde{u}(3)\end{bmatrix}^{\top}
\end{align}
For the discretization of $\tilde{L} = r\tilde{I} - \D[xx]$, since it is composed of two parts we shall follow the procedure in \ref{sec:general-composite}:
\begin{itemize}
\item The scaling operator $r\tilde{I}$ is discretized simply as $rI_M$, defined on the interior. There's no boundary points associated with it.
\item The discrete stencil operator for $\D[xx]$ is defined in \cref{eq:L-2}, specifically for this case it is
\begin{align}
L_2 &= \begin{bmatrix}
1 & -2 & 1 & 0 & 0\\
0 & 1 & -2 & 1 & 0\\
0 & 0 & 1 & -2 & 1\\
\end{bmatrix}\in \R^{M \times \bar{M}}
\end{align}
$L_2$ needs an extra boundary point at each end. Its associated boundary operator is given in \cref{eq:B-RR}. In particular, for this case we have a linear $B$ with
\begin{align}
B_L &= \begin{bmatrix}
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix}\in\R^{M_E \times \bar{M}},\quad
B_b = \begin{bmatrix}0 \\ 0\end{bmatrix}\label{eq:B-example-A1}
\end{align}
\item To extend $rI_M$ to $L_2$'s boundary, we simply pad a column of zeros to each end of the identity operator:
\begin{align}
I^E &\equiv \begin{bmatrix}
0 & 1 & 0 & 0 & 0\\
0 & 0 & 1 & 0 & 0\\
0 & 0 & 0 & 1 & 0
\end{bmatrix}
\end{align}
\item The composed stencil is then
\begin{align}
L &= rI^E - L_2\\
&= \begin{bmatrix}
-1 & 2 + r & -1 & 0 & 0\\
0 & -1 & 2+r & -1 & 0\\
0 & 0 & -1 & 2+r & -1
\end{bmatrix}\label{eq:L-example1}
\end{align}
\end{itemize}
\subsubsection{Solving the Stacked Equation}
% Note that if this matrix is singular, then the problem is not well-specified and no solutions exist? Given the solution, we can define the restriction operator to remove one from the first and one from the last as,
% \begin{align}
% R &\equiv E_{11}^{\top} = \begin{bmatrix}\textbf{0}_M & I_ M & \textbf{0}_M\end{bmatrix}\in\R^{M\times \bar{M}}
% \intertext{Then find the appropriate value as,}
% u &= R \bar{u}
% \intertext{These could be combined,}
% u &= R \begin{bmatrix} L \\ B
% \end{bmatrix}^{-1} \begin{bmatrix} p \\ b \end{bmatrix}
% \end{align}
% Note: the extension and restriction operators are transposes in this case? Also, note that some sort of pseudo-inverse of the $R$ matrix is its transpose. That is $R R^{\top} = I$.
Substituting $L$, $p$ and $B$ into the stacked extended equation \cref{eq:extended-stack}, we get
\begin{align}
\begin{bmatrix}
-1 & 2 + r & -1 & 0 & 0\\
0 & -1 & 2+r & -1 & 0\\
0 & 0 & -1 & 2+r & -1\\
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix} \bar{u} &= \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ 0 \\ 0\end{bmatrix}\label{eq:stacked-example-1}
\end{align}
We will now show that the rows corresponding to $B$ can be used in Gaussian elimination to reduce the system to one defined in the interior. First, add the 4th row to the first row to get
\begin{align}
\begin{bmatrix}
1-1 & -1+2+r & -1 & 0 & 0\\
0 & -1 & 2+r & -1 & 0\\
0 & 0 & -1 & 2+r & -1\\
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix} \bar{u} &= \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ 0 \\ 0\end{bmatrix}
\intertext{Next, add the 5th row to the 3rd row and simplify,}
\begin{bmatrix}
0 & 1+r & -1 & 0 & 0\\
0 & -1 & 2+r & -1 & 0\\
0 & 0 & -1 & 1+r & 0\\
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix} \begin{bmatrix}\bar{u}(-1)\\ \bar{u}(0) \\ \bar{u}(1)\\ \bar{u}(2) \\ \bar{u}(3) \end{bmatrix}
&= \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ 0 \\ 0\end{bmatrix}
\intertext{Notice that we have eliminated the extension nodes from all of the equations involving the $L$. Consequently, can just take out the sub-matrix between columns 2-4 and rows 1-3 to get}
\begin{bmatrix}
1+r & -1 & 0\\
-1 & 2+r & -1\\
0 & -1 & 1+r
\end{bmatrix}u
&= \begin{bmatrix} p_1 \\ p_2 \\ p_3\end{bmatrix}\label{eq:reduced-example-1}
\end{align}
On the other hand, for the reflecting boundaries, we know from \cref{eq:Q-RR} the boundary extrapolation operator is
\begin{align}
Q^B &\equiv \begin{bmatrix}
1 & 0 & 0\\
1 & 0 & 0\\
0 & 1& 0\\
0 & 0& 1\\
0 & 0& 1
\end{bmatrix}\label{eq:Q-example}
\end{align}
and the associated discretized equation on the interior is $LQ^Bu = p$. It is easy to check that plugging $L$ from \cref{eq:L-example1} and $Q^B$ from \cref{eq:Q-example} also gives \cref{eq:reduced-example-1}, which proves the equivalence between the two equations.
\subsection{Example 2: Diffusion and Drift with Reflecting Boundaries}\label{sec:appendixA-example2}
\subsubsection{Continuous Equation}
Let's add a drift term to \ref{sec:appendixA-example1} with constant rate $\mu < 0$ and keep everything else the same. The stochastic process is now $d x_t = \mu d t + \sqrt{2} d W_t$ and the corresponding stationary HJBE becomes
\begin{align}
\tilde{L} u(x) &= \tilde{p}(x)\\
\tilde{L} &\equiv r\tilde{I} - \D[xx] - \mu \D[x]\\
\D[x]\tilde{u}(0) &= \D[x]\tilde{u}(2) = 0
\end{align}
\subsubsection{Discretized Equation}
Again we follow the procedure of \ref{sec:general-composite} for the discretization of $\tilde{L}$. The biggest common extended domain is still the one used by $L_2$ so the parts regarding $rI^E$, $L_2$ and $B$ in \ref{sec:appendixA-example1} remain the same.
For $\D[x]$, because $\mu < 0$ backward difference should be used and that gives the stencil operator $L^-_1$ \cref{eq:L-1}, which in this case is
\begin{align}
L^-_1 &= \begin{bmatrix}
-1 & 1 & 0 & 0 \\
0 & -1 & 1 & 0 \\
0 & 0 & -1 & 1
\end{bmatrix}
\end{align}
The extended domain for $L^-_1$ only needs a boundary point at the bottom. We need to extend it to the whole domain, which include one extra node at the top end. This can be achieved by padding a column of zeros to $L^-_1$:
\begin{align}
L^{-E}_1 &= \begin{bmatrix}
-1 & 1 & 0 & 0 & 0\\
0 & -1 & 1 & 0 & 0\\
0 & 0 & -1 & 1 & 0
\end{bmatrix}
\end{align}
Finally, the composed operator is
\begin{align}
L &= rI^E - L_2 - \mu L^{-E}_1 \\
&= \begin{bmatrix}
-1 + \mu & 2 -\mu + r & -1 & 0 & 0\\
0 & -1 + \mu & 2 - \mu +r & -1 & 0\\
0 & 0 & -1 + \mu & 2 - \mu +r & -1
\end{bmatrix}
\end{align}
and the stacked equation \cref{eq:extended-stack} is
\begin{align}
\begin{bmatrix}
-1 + \mu & 2 -\mu + r & -1 & 0 & 0\\
0 & -1 + \mu & 2 - \mu +r & -1 & 0\\
0 & 0 & -1 + \mu & 2 - \mu +r & -1\\
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix} \bar{u} &= \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ 0 \\ 0\end{bmatrix}
\end{align}
\subsubsection{Solving the Stacked Equation}
Again we solve the stacked equation \cref{eq:extended-stack} using Gaussian elimination on the $B$ rows. First add $(1-\mu)$ times row 4 to row 1, and then add row 5 to row 3. This gives
\begin{align}
\begin{bmatrix}
0 & 1 + r & -1 & 0 & 0\\
0 & -1 + \mu & 2 - \mu +r & -1 & 0\\
0 & 0 & -1 + \mu & 1 - \mu +r & 0\\
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix} \bar{u} &= \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ 0 \\ 0\end{bmatrix}
\end{align}
Extract the interior of the matrix to get
\begin{align}
\begin{bmatrix}
1 + r & -1 & 0\\
-1 + \mu & 2 - \mu +r & -1\\
0 & -1 + \mu & 1 - \mu +r
\end{bmatrix} u &= \begin{bmatrix} p_1 \\ p_2 \\ p_3\end{bmatrix}\label{eq:reduced-example-2}
\end{align}
The $Q^B$ in this example is the same as \cref{eq:Q-example}. It is easy to check that the interior equation $LQ^Bu = p$ also gives \cref{eq:reduced-example-2}, again confirming that the extended equation gives the same resuls.
\subsection{Example 3: Diffusion with Inhomogeneous Boundaries}\label{sec:appendixA-example3}
\subsubsection{Continuous Equation}
Let's consider \ref{sec:appendixA-example1} again but change the boudnary conditions to be inhomogeneous. The stationary HJBE:
\begin{align}
\tilde{L}u(x) &= \tilde{p}(x)\\
\tilde{L} &\equiv r\tilde{I} - \D[xx] \\
\D[x]\tilde{u}(0) &= b^{\min}\\
\D[x]\tilde{u}(2) &= b^{\max}
\end{align}
\subsubsection{Discretized Equation}
The discretized $L$ and $p$ are the same as \ref{sec:appendixA-example1}. Since the boundary conditions are now inhomogeneous $B$ is no longer linear. Recalling \cref{eq:B-RR}, for this example we have
\begin{align}
B_L = \begin{bmatrix}
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix}\in\R^{M_E \times \bar{M}}\quad
B_b = \begin{bmatrix}b^{\min}\\-b^{\max}\end{bmatrix}
\end{align}
and the stacked equation \cref{eq:extended-stack} is now
\begin{align}
\begin{bmatrix}
-1 & 2 + r & -1 & 0 & 0\\
0 & -1 & 2+r & -1 & 0\\
0 & 0 & -1 & 2+r & -1\\
1 & -1 & 0 & 0 & 0\\
0 & 0 & 0 & -1 & 1
\end{bmatrix} \bar{u} &= \begin{bmatrix} p_1 \\ p_2 \\ p_3 \\ -b^{\min} \\ b^{\max}\end{bmatrix}
\end{align}
\subsubsection{Solving the Stacked Equation}
Since the left hand side coefficient matrix is the same, we can use the same Gaussian elimination procedure as \ref{sec:appendixA-example1}. This gives the reduced equation
\begin{align}
\begin{bmatrix}
1+r & -1 & 0\\
-1 & 2+r & -1\\
0 & -1 & 1+r
\end{bmatrix}u
&= \begin{bmatrix}
p_1 - b^{\min}\\ p_2 \\ p_3 + b^{\max}
\end{bmatrix}\label{eq:reduced-example-3}
\end{align}
For the $LQ^Bu = p$ route, $Q^B$ is now affine and from \cref{eq:Q-RR} we have
\begin{align}
Q^B_L = \begin{bmatrix}
1 & 0 & 0 \\
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1 \\
0 & 0 & 1
\end{bmatrix}\quad
Q^B_b = \begin{bmatrix}
-b^{\min}\\0\\0\\0\\b^{\max}
\end{bmatrix}
\end{align}
and the equation is
\begin{align}
LQ^B_Lu &= p - LQ^B_b
\end{align}
substituting $L$, $p$, $Q^B_L$ and $Q^B_b$, we get back \cref{eq:reduced-example-3}, again proving the equivalence.
\section{Affine Relations and Intuition}
The following provides intuition on the relationships above:\footnote{\textbf{TODO: Fernando/Steven} I think you will need to rewrite this with the modified notation and go through it carefully. I don't quite get it, and the notation was slightly different than the rest of the text... Also, I think that abusing notation for the discretized and non-discretized is part of the problem. We might want to rewrite this a little after the expanding operator setup is dine.}
\begin{itemize}
\item Define $S_E$ as the set of non interior grid point indexes, e.g, if we have a univariate problem with just three non-interior point, let's say, the first one and the last two grid points, then $M_E = 3$ and $S_E = \{1,\bar{M}-1,\bar{M}\}$\footnote{Notice that, by construction, the number of elements in $S_E$ is always $M_E$}.
\item Now let's focus on solving an expression like $\bar{u} = A \bar{u}$ where $A$ is affine.\footnote{\textbf{Fernando/Steven}: Is this a particular operator you have in mind from our setup, or a general affine operator you are going through? Point it out from above, and differentiate the $\tilde{A}$ from the discretized $A$} Consider, with some abuse of notation, that such expression means both the discretized and non-discretized forms. Define, for any arbitrary matrix J with at least $M_E$ columns, that $J[:,S_E]$ is a submatrix whose columns are the concatenated vectors $J[:,s]$, $\forall s \in S_E$.
Then we have two relations\footnote{We could not find a way to clearly show two relations above are correct, but some intuitions are provided in the text}:
%
\begin{align}
A [:,S] \left(B[:,S_E]^{-1} b \right) = A Q_b\label{affine_relation_1}\\
(A -A [:,S_E] (B[:,S_E]^{-1} B)) R^{\top} = A Q_L\label{affine_relation_2}
\end{align}
Our intuition is that the main idea here is using interiors to recover a relation that boundary conditions should satisfy. Since $Q_b$ is a length $\bar{M}$ vector containing zeros excepts two ends, the two non-zero elements in $Q_b$ capture partial information of boundary nodes (the part that is ``independent'' of interiors).\footnote{\textbf{Typo From Chris}
\cref{affine_relation_1} is actually defined from \cref{affine_relation_2} which is defined from the next one. Looking at it like that, it's clear to see the error
since 89 is just saying
$(A - AQb)*R^T = AQL$
substitute in 88 for AQb)
you see the substitution was done incorrectly
has a big B instead of a little b.
}
Recall \cref{eq:B_operator_block}, so $\left(B[:,S_E]^{-1} b \right)$ recovers the ``independent" part of boundary nodes. Then it is reasonable to expect that \cref{affine_relation_1} holds.
Multiply both sides of \cref{affine_relation_2} by $\bar{u}$, we can roughly rewrite the relation as
\begin{equation}
(A \bar{u}-A Q_b) R^{\top} = A Q_L u
\end{equation}
so $A \bar{u}-A Q_b$ will be a discretized $\bar{u}$ which contains the entire information of interiors and the rest part of boundary information that is not covered by $A Q_b$.
However, we are not sure if $R$ should exist on the left of \cref{affine_relation_2} since R by definition is a restriction operator and $(A \bar{u}-A Q_b) R^{\top}$ only contains information from interiors.
% % we already solved this by changing R place in the equation. It seems to be right, since Steven reported that now both affine relationships hold for the examples
% Also the size of the LHS of \cref{affine_relation_2} is $M\times \bar{M}$, but the size of the RHS is $\bar{I}\times I$ .
%
For now, while we work on better understanding those expressions, we will take them as given.
Given those relationships, in order to solve the differential equation, we now only have to solve for the interior. Otherwise, including the boundary values would imply having more points than there are degrees of freedom in the problem - thus making the numerical solution unstable. Moreover, the boundaries are given directly by the interior $\bar{u} = Q u$.
Therefore, we actually want to solve $\bar{u} = A Q u$. Notice that the discretized A maps from the full domain to the interior\footnote{Notice that the PDE is only defined on the interior}. Notably, that means it's not square. Additionally, consider that, as described above, since $Q$ is in general affine, thus:
\begin{equation}
\bar{u} = A Q_L u + A Q_b
\end{equation}
Those are the linear equations which define the ODE or whose solution is the solution to the PDE.
\end{itemize}
\end{document}
% Algebra has some issues. B, R and Q are wrong.
% \section{The multivariate case}
% \paragraph{Grids}Consider the multivariate function of $x = (x^1,...,x^n )$:
% \begin{itemize}
% \item Consider we have $\bar{I}_j$ points, $\set{x^j_i}_{i=1}^{\bar{I}_j}$%I think here you mean \bar{I}_j instead of \bar{I}.
% with $x^j_1 = x^{\min}^j$ and $x^j_I = x^{\max}^j$ when $x^j \in [x^{\min}^j, x^{\max}^j]$ for each $j \in \{1,...,n\}$. After discretizing, we will often denote the grid with the variable name, i.e. $x^j \equiv \set{x^j_i}_{i=1}^{\bar{I}_j}$
% In the simple case of a uniform grid, $\Delta^j \equiv x_{i+1}^j - x_i^j$ for all $i < \bar{I}^j$.
% \item When we discretize a function, use the function name without arguments to denote the vector. i.e. $\bar{u}(x)$ discretized on a grid $\set{x_i}_{i=1}^{\bar{I}}$ is $\bar{u} \equiv \set{\bar{u}(x_i)}_{i=1}^{\bar{I}} \in \R^{\bar{I}}$, where $\bar{I} = \sum_{j=1}^n \bar{I}_j$ and $I = \sum_{j = 1}^n I_j$.
% Use $i=1,... I_j$ as the grid in the interior. Moreover, define $I_{j,L}$ and $I_{j,H}$ as the non interior grid points. Therefore, $\bar{I}_j = I_j + I_{j,L} + I_{j,H}$. % Again, I think you mean I_j's in above cases.
% Notice that the solution to the discretized function is $u \in R^{\bar{I}}$ if stationary.
%
% We construct the discretized $\bar{u}$ by column stacking the discretized $\bar{u}_{x^j}$ on each grid $j \in \{1,...,n\}$:
% \begin{equation}
% \bar{u} = \begin{bmatrix}
% \bar{u}_{x^1}\\
% \bar{u}_{x^2}\\
% \vdots\\
% \bar{u}_{x^n}\\
% \end{bmatrix}\label{u_def}
% \end{equation}
%
%
%
% \item For any arbitrary variable $y(x)$ defined in the whole space of $x$, we define $\bar{y}$ as its discretization on the whole domain of $x$ and $y$ as the discretization only for interior points of the domain. So while $\bar{y}$ has length $\bar{I}$, $y$ has length $I$.
% \end{itemize}
%
% \subsection{General Overview of Discretization and Boundary Values}\label{sec:general}
% \textbf{TODO:} Explain the $R, Q, B, A$ etc. for the general notation from \url{https://github.com/JuliaDiffEq/DifferentialEquations.jl/issues/260}. The idea here is to make sure we understand everything about the ghost nodes, boundary values, etc.
% \begin{itemize}
% \item A is defined as the discretization of the partial differential operator in use.
%
% \item B is the boundary condition operator. For $N$ dimensions, we can define B as a block matrix in the form:
% \begin{equation}
% B\cdot \bar{u} \equiv
% %\begin{bmatrix}
% %\text{B}_{x^{1},L} & ... & \text{B}_{x^{N},L}\\
% %\text{B}_{x^{2},H} & ... & \text{B}_{x^{N},H}
% %\end{bmatrix}
% \begin{bmatrix}
% \text{B}_{x^{1},L} & \mathbf{0}_{1,L} & ... & \mathbf{0}_{1,L}\\
% \text{B}_{x^{1},H} & \mathbf{0}_{1,H} & ... & \mathbf{0}_{1,H}\\
% \mathbf{0}_{2,L} & \text{B}_{x^{2},L} & ... & \mathbf{0}_{2,L}\\
% \mathbf{0}_{2,H} & \text{B}_{x^{2},H} & ... & \mathbf{0}_{2,H}\\
% \vdots & \vdots & \ddots & \vdots \\
% \mathbf{0}_{N,L} & ... & \mathbf{0}_{N,L} & \text{B}_{x^{N},L} & \\
% \mathbf{0}_{N,H} & ... & \mathbf{0}_{N,H} & \text{B}_{x^{N},H} & \\
% \end{bmatrix}
% \bar{u}
% =
% \begin{bmatrix}
% \text{z}_{{1},L}\\
% \text{z}_{{1},H}\\
% \text{z}_{{2},L}\\
% \text{z}_{{2},H}\\
% \vdots\\
% \text{z}_{{N},L}\\
% \text{z}_{{N},H}\\
% \end{bmatrix}
% %\begin{bmatrix}
% %\text{z}_{1, L}&\text{z}_{2, L}&\dots&\text{z}_{n, L}\\
% %\text{z}_{1, H}&\text{z}_{2, H}&\dots&\text{z}_{n, H}
% %\end{bmatrix}
% \label{eq:B_operator_block}
% \end{equation}
% for any $\bar{u}$ in the space of functions that satisfy the boundary conditions and where B$_{x^j,k}$ is a block matrix satisfying B$_{x^j,k}\cdot \bar{u}_{x^j} = \text{z}_{j,k}$ for $k \in \{L,H\}$ and $j = 1, 2,\dots n$. The size of B$_{x^j,k}$ is $I_{j,k} \times \bar{I}_j$.
%
% Notice that B is not necessarily unique. The choice of B is exactly the choice of boundary value discretization. For instance, choosing to do first or second order Neumann border conditions is simply the choice of the operator B. The size of $B$ is $(I_A + I_H) \times \bar{I}$.
%
% \item R is the restriction operator which is defined by the domain. It removes columns which are not in the interior.
% For an univariate process, we have:
% \begin{equation}
% R\cdot \bar{u} \equiv\set{u(x_j)}_{j \in \{1,...,I\}} \in \R^{I} \label{eq:R_operator}
% \end{equation}
% Notice that R has size $I \times \bar{I}$ and R is defined as
% \begin{equation}
% R_j = %\underbrace{
% \begin{bmatrix}
% 0&\dots&0&1&0&\dots&0&0&\dots&0\\
% 0&\dots&0&0&1&\dots&0&0&\dots&0\\
% \vdots&\ddots&\vdots&\vdots&\vdots&\ddots&\vdots&\vdots&\ddots&\vdots\\
% 0&\dots&0&0&0&\dots&1&0&\dots&0
% \end{bmatrix}%}_{I_{j, L}\qquad+\qquad I_j\qquad+\qquad I_{j, H}}
% \end{equation}
% \hspace{5.1cm} \begin{tikzpicture}[every text node part/.style={align=center}]
%
% \draw[black, thick, decoration = {brace,mirror,raise=0cm},decorate] (8,0) -- node[below=6pt] {$I_{j,L}$} (9.6,0);
%
% \draw[black, thick, decoration = {brace,mirror,raise=0cm}, decorate]
% (10,0) -- node[below=6pt] {$I_{j}$} (12.1,0);
%
% \draw[black, thick, decoration = {brace,mirror,raise=0cm},decorate] (12.5,0) -- node[below=6pt] {$I_{j,H}$} (14.1,0);
%
% \end{tikzpicture}
%
% \item Q is the operator that is defined as
% \begin{equation}
% Q \cdot R\cdot\bar{u} = \bar{u}\label{eq:Q_operator_1}
% \end{equation}
% for any $\bar{u}$ that satisfies the border conditions. (Notice that $Q = R^{-1}$ if R is square, and this is only true as maps on functions which satisfy the boundary). Formally, we define $Q$ as a matrix of size $\bar{I} \times I$ given by:
% \begin{equation}
% Q = \begin{bmatrix}
% Q_1 & \mathbf{0}_1 & ... & \mathbf{0}_1\\
% \mathbf{0}_2 & Q_2 & ... & \mathbf{0}_2\\
% \vdots & \vdots & \ddots & \vdots \\
% \mathbf{0}_n & \mathbf{0}_n & ... & Q_n\\
% \end{bmatrix}\label{Q_def}
% \end{equation}
% where, for any arbitrary $j$, $Q_j$ is a partitioned matrix of size $\bar{I_j} \times I_j$ given by:
% \begin{equation}
% Q_j = \begin{bmatrix}
% & & Q_{j,L} & & \\
% & & \mathbb{I}_{(I_j \times I_j)} \\
% & & Q_{j,H} & & \\
% \end{bmatrix}\label{Qj_def}
% \end{equation}
% whose submatrices $Q_{j,L}$ and $Q_{j,H}$ (of size $I_{j,L} \times I_j$ and $I_{j,H} \times I_j$, respectively) elements depend on the border conditions and the submatrix $\mathbb{I}_{(I_j\times I_j)}$ is an identity matrix of dimension $I_j$.
%
% Q also has a second relation:
% \begin{equation}
% B\cdot Q\cdot u =
% %\begin{bmatrix}
% %\text{B}_{x^{1},L} & ... & \text{B}_{x^{N},L}\\
% %\text{B}_{x^{2},H} & ... & \text{B}_{x^{N},H}
% %\end{bmatrix}
% \begin{bmatrix}
% \text{B}_{x^{1},L} & \mathbf{0}_{1,L} & ... & \mathbf{0}_{1,L}\\
% \text{B}_{x^{1},H} & \mathbf{0}_{1,H} & ... & \mathbf{0}_{1,H}\\
% \mathbf{0}_{2,L} & \text{B}_{x^{2},L} & ... & \mathbf{0}_{2,L}\\
% \mathbf{0}_{2,H} & \text{B}_{x^{2},H} & ... & \mathbf{0}_{2,H}\\
% \vdots & \vdots & \ddots & \vdots \\
% \mathbf{0}_{N,L} & ... & \mathbf{0}_{N,L} & \text{B}_{x^{N},L} & \\
% \mathbf{0}_{N,H} & ... & \mathbf{0}_{N,H} & \text{B}_{x^{N},H} & \\
% \end{bmatrix}
% \bar{u}
% =
% \begin{bmatrix}
% \text{z}_{{1},L}\\
% \text{z}_{{1},H}\\
% \text{z}_{{2},L}\\
% \text{z}_{{2},H}\\
% \vdots\\
% \text{z}_{{N},L}\\
% \text{z}_{{N},H}\\
% \end{bmatrix}
% %\begin{bmatrix}
% %\text{z}_{1, L}&\text{z}_{2, L}&\dots&\text{z}_{n, L}\\
% %\text{z}_{1, H}&\text{z}_{2, H}&\dots&\text{z}_{n, H}
% %\end{bmatrix}
% %\end{bmatrix}