import math
import itertools
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
For a stochastic process $ \{ W_t \}_t\in [0,T] $.
Repeat coin toss. Each expirments is independent.
$ w=w_1 w_2 w_3 ... $, where $w_i$ is the outcome of $i^{th}$ coin toss, so $w={-1,1}$ tail and head.
$\begin{equation} W_{i} = \begin{cases} 1 & \text{if $w_i=H$}\\ -1 & \text{if $w_i=T$}\\ \end{cases} \end{equation}$
We define the cumulative results of coin tossing as $M_k$
$$M_k=\sum_{j=1}^k X_j$$M = 10000 # number of simulations
t = 10 # Time
random_walk = [-1, 1]
steps = np.random.choice(random_walk, size = (M, t)).T
# each simulation generate one row of data, # of values in this row = t "time" of value
# so total number of random choice is M*t
# '.T' at the end transposes the matrix. Then, each column is a simulation, or says a vector of data.
origin = np.zeros((1, M))
# by '.T' simulations become vector(in columns), we need to add the beginning, they all start from zero, so origin create the zero
rw_paths = np.concatenate( [origin, steps] ).cumsum(axis=0)
# concate origins and matrix together. Then, cumsum. 'axis=0' make cumsum in columns
plt.plot(rw_paths)
plt.xlabel('Years (t)')
plt.ylabel('Move')
plt.show()
The figure above shows $M_k$, k=10=t, and there are M=10, $M_k$
$ 0=k_0<k_1<k_2<...<k_m $
$ M_{k_1}=(M_{k_1}-M_{k_0}),(M_{k_2}-M_{k_1}),... ,(M_{k_m}-M_{k_{m-1}})$
$\mathbb{E} (M_{k_{i+1}}-M_{k_{i}})=0$
$Var((M_{k_{i+1}}-M_{k_{i}}))=k_{i+1}-k_i$
Conditional expectation in the next period is the current value.
$\mathbb{E}[\ \cdot \ |F_t]$ is the conditional expectation based on all the information up to time t. Then,
$$\begin{align} \mathbb{E}[M_{t+k} |F_t]=\mathbb{E}[M_{t+k}-M_{t}+M_{t} |F_t] \\ &= \mathbb{E}[M_{t+k}-M_{t}|F_t]+\mathbb{E}[M_{t} |F_t]\\ &= \mathbb{E}[M_{t+k}-M_{t}|F_t]+M_{t}\\ &= M_t \end{align} $$Quadratic Variation: $ \sum_{i=1}^k (M_i - M_{i-1})^2=k $
# Create Quadratic variation and Variance functions
quadratic_variation = lambda x: round(np.square(x[:-1]-x[1:]).sum(),3)
variance = lambda x: round(np.var(x,axis=0),3)
[quadratic_variation(path) for path in rw_paths.T]
# Change the number of simulation to 10,000,000 to observe variance convergence on Time
[variance(path) for path in rw_paths[1:11]]
[1.0, 2.005, 3.023, 4.0, 5.034, 6.015, 7.084, 7.994, 8.941, 9.95]
# Parameters
M = 10 # number of simulations
t = 10 # Time
n = 100
random_walk = [-1, 1]
steps = (1/np.sqrt(n)) * np.random.choice(random_walk, size=(M,t*n)).T
origin = np.zeros((1,M))
srw_paths = np.concatenate([origin, steps]).cumsum(axis=0)
time = np.linspace(0,t,t*n+1)
tt = np.full(shape=(M, t*n+1), fill_value=time)
tt = tt.T
# print(np.size(tt),np.size(srw_paths))
plt.plot(tt,srw_paths)
plt.xlabel("Years (t)")
plt.ylabel("Move")
plt.show()
# Change the number of simulation to 100,000 to observe variance convergence on Time
[variance(path) for path in srw_paths[1:11]]
[quadratic_variation(path) for path in srw_paths.T[:4]]
[10.0, 10.0, 10.0, 10.0]
# Change the parameter n to see the impact of increasing the discretization
# of the random walk and how it converges on the normal distribution
n = 10
t = 10
# Combinations
def nCr(n,k):
f = math.factorial
return f(n) / (f(k) * f(n-k))
perms = [nCr(n*t,k)*(0.5)**(n*t) for k in range(int(n*t)+1)]
W_nt = lambda n,t: 1/np.sqrt(n) * np.arange(-n*t,n*t+1,2)
outcomes = W_nt(n,t)
plt.bar(outcomes,[perm/(outcomes[1]-outcomes[0]) for perm in perms],outcomes[1]-outcomes[0],
label='{0} scaled RW'.format(n))
x = np.linspace(-3*np.sqrt(t), 3*np.sqrt(t), 100)
plt.plot(x, stats.norm.pdf(x, 0, np.sqrt(t)), 'k-',label='normal dist')
plt.xlim(-3*np.sqrt(t),3*np.sqrt(t))
plt.ylabel("Probability")
plt.xlabel("Move")
plt.legend()
plt.show()
$ lim_{n\rightarrow \infty} W^{(n)}(t)\sim N(0,t) $
Recall that a Brownian Motion is a stochastic Process $ \{ W_t \}_t\in [0,T] $.
$ W_t \sim N(0,t) $
$ \mathbb{E}(W_{t,i+1}-W_{t,i})=0 $
$ Var(W_{t,i+1}-W_{t,i})= t_{i+1} -t_i$
# Parameters
M = 10 # number of simulations
t = 10 # Time
n = 100 # steps we want to see
dt = t/n # time step
# use Normal Dist directly!
steps = np.random.normal(0, np.sqrt(dt), size=(M, n)).T
origin = np.zeros((1,M))
bm_paths = np.concatenate([origin, steps]).cumsum(axis=0)
time = np.linspace(0,t,n+1)
tt = np.full(shape=(M, n+1), fill_value=time)
tt = tt.T
plt.plot(tt,bm_paths)
plt.xlabel("Years (t)")
plt.ylabel("Move")
plt.show()
# change time steps to 1,000,000 to observe same quadratic variation along paths
[quadratic_variation(path) for path in bm_paths.T[:4]]
# change simulations to 100,000 to observe convergence of variance to Time at a particular time step
[variance(path) for path in bm_paths[1:11]]
[0.12, 0.167, 0.224, 0.161, 0.284, 0.409, 0.444, 0.66, 0.694, 0.932]
by Fanyu Zhao