\documentclass{article}
\usepackage{amssymb}
\usepackage{amsmath}
%\usepackage{slide-article-tom}
\usepackage[landscape]{slide-article-tom}
\ifx\pdfoutput\undefined
\usepackage[dvips]{graphicx}
\else
\usepackage[pdftex]{graphicx}
\pdfcompresslevel9
\fi
\usepackage[all]{xy}
\setlength{\unitlength}{1cm}
%\usepackage{cyrillic}
%\newcommand{\cyrrm}{\fontencoding{OT2}\selectfont\textcyrup}
%\newcommand{\cyrit}{\fontencoding{OT2}\selectfont\textcyrit}
%\newcommand{\cyrsl}{\fontencoding{OT2}\selectfont\textcyrsl}
%\newcommand{\cyrsf}{\fontencoding{OT2}\selectfont\textcyrsf}
%\newcommand{\cyrbf}{\fontencoding{OT2}\selectfont\textcyrbf}
%\newcommand{\cyrsc}{\fontencoding{OT2}\selectfont\textcyrsc}
%%%% cyrrm = "Roman", or really upright, normal font
%%%% cyrit = Italic (cursive forms of letters)
%%%% cyrsl = Italic (non-cursive forms of letters)
%%%% cyrsf = Sans-serif
%%%% cyrbf = Bold-face
%\usepackage[OT2,T1]{fontenc}
%\DeclareSymbolFont{cyrletters}{OT2}{wncyr}{m}{n}
%\DeclareMathSymbol{\Sha}{\mathalpha}{cyrletters}{"58}
\usepackage{fixltx2e}
\usepackage{wrapfig}
\usepackage{amsthm}
\newtheorem*{mydef}{Definition}
\theoremstyle{definition}
\usepackage[OT2,T1]{fontenc}
%\usepackage[T2A,T1]{fontenc}
\input cyracc.def
\font\tencyr=wncyr10
\font\tencyb=wncyb10
\font\tencyi=wncyi10
\font\twlcyr=wncyr10 at 12pt
\font\twlcyb=wncyb10 at 12pt
\font\twlcyi=wncyi10 at 12pt
\font\cyrtwntyfr=wncyr10 at 24pt
\font\cybtwntyfr=wncyb10 at 24pt
\font\cyitwntyfr=wncyi10 at 24pt
\font\cyrtwntyfv=wncyr10 at 25pt
\font\cybtwntyfv=wncyb10 at 25pt
\font\cyitwntyfv=wncyi10 at 25pt
\font\cyrtwntysx=wncyr10 at 26pt
\font\cybtwntysx=wncyb10 at 26pt
\font\cyitwntysx=wncyi10 at 26pt
%\font\etcmcyr=cmcyr17 at 24pt
%\font\etcmcyb=cmcyb17 at 24pt
%\font\etcmcyi=cmcyi17 at 24pt
%\newcommand\cyr{\twlcyr\cyracc}
%\newcommand\cyb{\twlcyb\cyracc}
%\newcommand\cyi{\twlcyi\cyracc}
\newcommand\twntyfrcyr{\cyrtwntyfr\cyracc}
\MakeRobust{\twntyfrcyr}
\newcommand\twntyfrcyb{\cybtwntyfr\cyracc}
\MakeRobust{\twntyfrcyb}
\newcommand\twntyfrcyi{\cyitwntyfr\cyracc}
\MakeRobust{\twntyfrcyi}
\newcommand\twntyfvcyr{\cyrtwntyfv\cyracc}
\MakeRobust{\twntyfvcyr}
\newcommand\twntyfvcyb{\cybtwntyfv\cyracc}
\MakeRobust{\twntyfvcyb}
\newcommand\twntyfvcyi{\cyitwntyfv\cyracc}
\MakeRobust{\twntyfvcyi}
\newcommand\twntysxcyr{\cyrtwntysx\cyracc}
\MakeRobust{\twntysxcyr}
\newcommand\twntysxcyb{\cybtwntysx\cyracc}
\MakeRobust{\twntysxcyb}
\newcommand\twntysxcyi{\cyitwntysx\cyracc}
\MakeRobust{\twntysxcyi}
\usepackage{tikz}
\usetikzlibrary{backgrounds,calc,arrows,shapes.arrows}
\usepackage{calc}
\tikzset{
%MyCube/.style={scale=1.0,x={(-.4cm,-0.3cm)},y={(0.6cm,-0.3cm)},z={(0.2cm,0.7cm)}},
MyCube/.style={scale=1.0,x={(-0.55cm,-0.3cm)},y={(1cm,0cm)},z={(0cm,1cm)}},
MyFaceA/.style={green!70!,draw=black},
MyFaceB/.style={blue!50!,draw=black},
MyFaceC/.style={blue!30!green!30!,draw=black}
}
\newcommand{\cube}[4]{%
% parameters #1,#2,#3 = coordinates of the center, parameter #4 = length of edge
\pgfmathparse{0.5*#4}%
\let\sd\pgfmathresult{}%
\coordinate (A) at ({#1+\sd},{#2+\sd},{#3+\sd});
\coordinate (E) at ({#1+\sd},{#2+\sd},{#3-\sd});
\coordinate (B) at ({#1+\sd},{#2-\sd},{#3+\sd});
\coordinate (F) at ({#1+\sd},{#2-\sd},{#3-\sd});
\coordinate (C) at ({#1-0.7*\sd},{#2-0.7*\sd},{#3+\sd});
\coordinate (G) at ({#1-\sd},{#2-\sd},{#3-\sd});
\coordinate (D) at ({#1-0.7*\sd},{#2+\sd},{#3+\sd});
\coordinate (H) at ({#1-0.7*\sd},{#2+\sd},{#3-0.7*\sd});
\fill[MyFaceA] (A)--(E)--(H)--(D)--cycle;
\fill[MyFaceB] (A)--(B)--(C)--(D)--cycle;
\fill[MyFaceC] (A)--(B)--(F)--(E)--cycle;
\draw (A)--(E)--(H)--(D)--cycle;
\draw (D)--(C)--(B)--(A);
\draw (B)--(F)--(E);
}
\usepackage[titles]{tocloft}
\setlength{\cftbeforesecskip}{-2ex}
\setlength{\cftbeforesubsecskip}{-2ex}
\usepackage{hyperref}
\hypersetup{colorlinks,
linkcolor=blue,
citecolor=blue,
%pdfpagemode=FullScreen
pdfpagemode=UseNone
}
%\newcommand\@linkcolor{red}
%\newcommand\@linkcolor{blue}
%\newcommand\@anchorcolor{black}
%\newcommand\@citecolor{red}
%\newcommand\@filecolor{cyan}
%\newcommand\@urlcolor{magenta}
%\newcommand\@menucolor{red}
%\newcommand\@pagecolor{red}
\newcommand\pagedone{\newpage}
\newcommand\tthdump[1]{#1}
\newcommand\sectionhead[1]{\begin{center}\section*{#1 \hyperlink{Contents}{\hfil$\leftarrow$}}
\end{center}
\addcontentsline{toc}{section}{#1}
\vspace{-\baselineskip}
}
\newcommand\subsectionhead[1]{\begin{center}\subsection*{#1 \hyperlink{Contents}{\hfil$\leftarrow$}}
\end{center}
\addcontentsline{toc}{subsection}{#1}
\vspace{-\baselineskip}
}
\newcommand\cyrsubsectionhead[1]{\begin{center}\subsection*{#1 \hyperlink{Contents}{\hfil$\leftarrow$}}
\end{center}
\addcontentsline{toc}{subsection}{#1}
\vspace{-\baselineskip}
}
\newcommand\exercises[1]{\begin{center}\subsection*{#1 -~exercises
\hyperlink{Contents}{\hfil$\leftarrow$}}\end{center}
\addcontentsline{toc}{subsection}{#1 (exercises)}
\vspace{-\baselineskip}
}
\newcommand\quotesection[1]{\begin{center}{\LARGE\hypertarget{#1}
{#1}\hyperlink{The quotes}{\hfil$\twoheadleftarrow$}}\end{center}}
\newcommand\httimes{\times}
\newcommand\mybigtriangledown{\bigtriangledown}
\newcommand\htvec[1]{\vec{#1}}
\newcommand\myspan{\mathrm{span}}
\newcommand\mymatrix{\mathrm{matrix}}
\newcommand\myreal{\mathbb{R}}
\newcommand\mycomplex{\mathbb{C}}
\newcommand\myfield{\mathbb{F}}
\newcommand\myquaternions{\mathbb{H}}
\newcommand\myrationals{\mathbb{Q}}
\newcommand\myintegers{\mathbb{Z}}
%\newcommand\dim\mathrm{dim}
\newcommand\im{\mathrm{im}}
\newcommand\deriv{\mathrm{D}}
\newcommand\trace{\mathrm{trace}}
\newcommand\detm{\mathrm{det}_m}
\newcommand\detv{\mathrm{det}_v}
\newcommand\perm{\mathrm{perm}}
\newcommand\sign{\mathrm{sign}}
% kets and bras etc.
\newcommand\ket[1]{|{#1}\rangle}
\newcommand\bra[1]{\langle{#1}|}
\newcommand\braket[2]{\langle{#1}|{#2}\rangle}
% defines a 2 element column vector.
\newcommand\col[2]{\left(\begin{array}{c}#1\\#2\end{array}\right)}
\newcommand\tcol[2]{(#1, #2)^T}
% define a new command to set font size
\newcommand\setmyfont[1]{ \fontsize{#1}{#1}\selectfont }
%\newcommand\setmyfont[1]{}
%\includeonly{Linear-String, Discrete-Linear, Discrete-Nonlinear}
%\includeonly{Discrete-Nonlinear}
%\includeonly{}
\begin{document}
\raggedright
\pagestyle{myfooters}
%\pagestyle{plain}
\thispagestyle{empty}
%Slide 1
\title{\begin{center}{\LARGE\bf Non-Ergodic Dynamics}\newline
(for Finance \& Economics)\newline
or\newline
{\LARGE\bf Multiplicative Random Walks}
\newline
%\title{\begin{center}{\LARGE\bf Non-Ergodic Dynamics}\newline
%and\newline
%{\LARGE\bf Multiplicative Random Walks}
%\newline
{\em \small Kinnaird Multidisciplinary Research Conference}
\end{center}
}
\author{
\begin{wrapfigure}{r}{0.35\textwidth}
\vspace{-2cm}
\begin{center}
{\fontsize{5}{5}\selectfont
http://csustan.csustan.edu/\~\ tom
}
%% \includegraphics[width=0.15\textwidth]{tom-carter-QR.png}
{\hyperref{http://csustan.csustan.edu/\~tom}{}{}
{ \includegraphics[width=0.2\textwidth]{tom-carter-QR-pic.png}
}
}
\end{center}
\end{wrapfigure}
Tom Carter \newline
\newline
Computer Science / Cognitive Studies\newline
CSU Stanislaus
%Computer Science\newline
%CSU Stanislaus\newline
\vfill
{\hyperref{http://csustan.csustan.edu/\~tom/Lecture-Notes/Non-Ergodic/Non-Ergodic.pdf}{}{}
{http://csustan.csustan.edu/\~\ tom/Lecture-Notes/Non-Ergodic/Non-Ergodic.pdf}}
}
%\date{Fall, 2011}
%\date{\today}
%\date{November 19, 2013}
\date{\today}
\maketitle
%Slide 2
%% \sectionhead{Our general topics:}
%% %%tth:\begin{itemize}
%% %%tth:\item
%% \tthdump{\hyperlink{Why differentiable manifolds}
%% {\ $\circledcirc$ Why differentiable manifolds\newline}}
%% %%tth:\makehyperlink{Why differentiable manifolds}
%% %%tth:\item
%% \tthdump{\hyperlink{References}
%% {$\circledcirc$ References\newline}}
%% %%tth:\makehyperlink{References}
%% %%tth:\end{itemize}
%%
%% (ex): exercises.
%%
%% \pagedone
\renewcommand\contentsname{Our general topics:\hypertarget{Contents}{}}
%% \newcommand\listfigurename{List of Figures}
%% \newcommand\listtablename{List of Tables}
\tableofcontents
\pagedone
\sectionhead{A Simple Example}
Consider the following gambling game. You are going to bet an amount of money $m$\ldots
\begin{enumerate}
\item You bet your money (say $m$ dollars).
\item A fair coin is fairly flipped \newline (i.e., $Pr(heads) = Pr(tails) = 0.5$)
\item If the coin comes up heads, you are paid back $m * 1.50$; \newline
if the coin comes up tails, you are paid back $m * 0.60$.
\end{enumerate}
Should you be willing to play this game?
\pagedone
Let's calculate the {\em expected value} of the game:
\begin{align*}
< game > & = Pr(heads) * m * 1.50 + Pr(tails) * m * 0.60 \\
& = 0.5 * m * 1.50 + 0.5 * m * 0.60 \\
& = m * 0.75 + m * 0.30 \\
& = m * 1.05
\end{align*}
Thus, by playing the game, you expect to increase the money you wagered
by $5\%$.
It seems that you should be happy to play this game (for any amount of money $m$).
But, let's ask a slightly different question: What is the probability that if you play the game once,
you will walk away from the game a winner?
Obviously, if you play the game exactly once, you have a $50/50$ chance of walking
away a winner (that is, if the coin comes up heads \ldots).
\pagedone
On the other hand, in order to actually realize the {\em expected value}, aren't you likely to
have to play the game many times?
Okay, suppose you play the game twice, with an initial bet of \$1.00, and you "let it ride"
(in other words, whatever happens in the first coin toss, you bet the entire amount on the next
coin flip).
The value of the game is: $v(HH) = \$2.25$, $v(HT) = \$0.90$, $v(TH) = \$0.90$, $v(TT) = \$0.36$.
So you only have one chance in four of walking away a winner (that is, if the coin comes up heads
twice in a row \ldots).
Let's explore that some. We'll take this in a particular direction,
generalizing into an iterated version. We are going to play the game $n$ times.
\pagedone
But, we're going to do it in a specific way, so that {\em time} ({\em history}) matters (we'll come
back to this issue later). We'll set things up so
that the outcome of later parts of the iteration depend on the results of earlier parts.
The new game follows \ldots
\pagedone
\begin{enumerate}
\item Let $v(0) = \$1$.
\item Let $j = 0$.
\item Now, repeat $n$ times:
\begin{enumerate}
\item A fair coin is fairly flipped ($Pr(heads) = Pr(tails) = 0.5$)
\item If the coin comes up heads, $v(j + 1) = 1.5 * v(j)$.
\item if the coin comes up tails, $v(j + 1) = 0.6 * v(j)$.
\item Set $j = j + 1$.
\end{enumerate}
\item You walk away with $v(n)$ dollars.
\end{enumerate}
\pagedone
Is this a good game to play?
Let's look at this several different ways. First, let's see if we can calculate the
{\em expected value} of the game. Suppose that we play the game
for some value of $n$. There are $2^n$ possible results of the game, each with probability
$\left(\frac{1}{2}\right)^n$. The expected value is then:
\begin{align*}
\left< game(n)\right> & = \left(\frac{1}{2}\right)^n * \sum_{j = 0}^n{n \choose j} (0.6)^j (1.5)^{n - j} \\
& = \left(\frac{1}{2}\right)^n * \left(0.6 + 1.5\right)^n \\
& = \left(\frac{1}{2}\right)^n * \left(2.1\right)^n \\
& = \left(\frac{2.1}{2}\right)^n \\
& = \left(1.05\right)^n
\end{align*}
(as we expected :-)
\pagedone
So, for some specific examples:
\begin{align*}
\left< game(10)\right> & = 1.63 \\
\left< game(20)\right> & = 2.65 \\
\left< game(50)\right> & = 11.47 \\
\left< game(100)\right> & = 131.5 \\
\left< game(1000)\right> & = 1.55 * 10^{21} \\
\end{align*}
That means that if I offer you the chance to play $game(100)$, you should be willing to wager \$130
(the expected value of the game is greater than your wager) \ldots
But, let's ask a slightly different question. What is the most likely amount you will walk away with if you
play $game(n)$?
The probability distribution we are working with here is the {\em binomial distribution} with $p = \frac{1}{2}$.
\pagedone
The value of $game(n)$ if the coin comes up tails $j$ times (and heads the other $n - j$ times) is
$$V(game(n), j) = (0.6)^j * (1.5)^{n - j}.$$
For even $n$, the {\em mode} of the distribution happens at $j = \frac{n}{2}$. Hence, the most likely
amount you will walk away with will be
$$VML(game(n)) = (0.6)^{n / 2} * (1.5)^{n / 2} = (0.6 * 1.5)^{n / 2} = (0.9)^{n / 2}.$$
That says:
\begin{align*}
VML(game(10)) & = 0.35 \\
VML(game(20)) & = 0.12 \\
VML(game(50)) & = 0.005 \\
VML(game(100)) & = 0.00003 \\
VML(game(1000)) & = 1.75 * 10^{-46} \\
\end{align*}
Hmmm \ldots The expected value is going up exponentially, but your most likely return is declining exponentially,
and, in fact, faster than the expected value is increasing.
Are you still willing to play the game?
Let's ask another question. Suppose I offer to let you play the game for \$1.00.
What is the probability that you will walk away from the game having broken even or better?
First, we can look at the number of {\em tails} you can have and still break even. This will happen
when
$$1 = (0.6)^j * (1.5)^{n - j}$$
or (taking logs)
$$j * \log(0.6) + (n - j) * \log(1.5) = 0$$
and so
$$j * (\log(1.5) - \log(0.6)) = n * \log(1.5)$$
and thus
$$ j = n * \frac{\log(1.5)}{\log(1.5) - \log(0.6)}$$
This works out to $ j \approx 0.4425 * n$.
You will thus break even or better if $j \le 0.4425 * n$ (remember, $j$ is the number of tails
-- you can't afford to have too many of them -- they're bad \ldots).
The probability of this happening will be
$$prob(j \le 0.4425 * n) = \left(\frac{1}{2}\right)^n * \sum_{i = 0}^{0.4425 * n}{n \choose i}$$
So, how can we calculate that?
\pagedone
One estimate that we can use, coming from Hoeffding's inequality, is
$$prob(j \le k) \leq \exp\left(-2 * \frac{(\frac{n}{2}-k)^2}{n}\right)$$
Putting in $k = 0.4425 * n$, we have, for example, when $n = 1000$:
\begin{align*}
prob(j \le 443) & \le \exp\left(-2 * \frac{(500 - 442)^2}{1000}\right) \\
& = \exp\left(-2 * \frac{58^2}{1000}\right) \\
& = \exp\left(-2 * \frac{3364}{1000}\right) \\
& = \exp\left(-2 * 3.364\right) \\
& = \exp\left(-6.728\right) \\
& \approx 0.0012
\end{align*}
\pagedone
%% Let's extend that question. Suppose I offer to let you play the game for \$m.
%% What is the probability that you will walk away from the game having broken even or better?
%%
%% First, we can look at the number of {\em tails} you can have and still break even. This will happen
%% when
%% $$20 = (0.6)^j * (1.5)^{n - j}$$
%% or (taking logs)
%% $$j * \log(0.6) + (n - j) * \log(1.5) = \log(m)$$
%% or
%% $$j * (\log(1.5) - \log(0.6)) = n * \log(1.5) - \log(m)$$
%% or
%% $$ j = \frac{n * \log(1.5) - \log(m)}{\log(1.5) - \log(0.6)}$$
%%
%% This works out to
%% $$ j \approx 0.4425 * n - \frac{\log(m)}{\log(2.5)}$$
%%
%% You will thus break even or better if
%% $$ j \leq 0.4425 * n - \frac{\log(m)}{\log(2.5)}$$
%% (remember, $j$ is the number of tails
%% -- you can't afford to have too many of them -- they're bad \ldots).
%% The probability of this happening will be
%% $$prob(j \le 0.4425 * n - \frac{\log(m)}{\log(2.5)})
%% = \left(\frac{1}{2}\right)^n * \sum_{i = 0}^{0.4425 * n - \frac{\log(m)}{\log(2.5)}}{n \choose i}$$
%%\pagedone
%% We can again use the estimate
%% $$prob(j \le k) \leq \exp\left(-2 \frac{(\frac{n}{2}-k)^2}{n}\right)$$
%% Putting in $k = 0.4425 * n - \frac{\log(m)}{\log(2.5)}$, we have, for several values of $m$
%% (keeping $n = 1000$):
%% \begin{table}[ht]
%% %% \caption{Nonlinear Model Results}
%% \centering
%% \begin{tabular}{c c c}
%% %%\hline\hline
%% m & $\frac{\log(m)}{\log(2.5)}$ & Prob. not losing \\ [0.5ex] % inserts table %heading
%% \hline
%% 1 & 0 & 0.0012 \\
%% 10 & 3 & 0.0006 \\
%% 100 & 5 & 0.0004 \\
%% 1000 & 8 & 0.0002 \\ [1ex]
%% \hline
%% \end{tabular}
%% \label{table:nonlin}
%% \end{table}
\pagedone
Just for fun, here is another estimation process derived from
Stirling's approximation for the factorial for ${ n \choose i} = \frac{n!}{i!(n-i)!}$.
As noted, we have
$$prob(j \le \lambda * n) = \left(\frac{1}{2}\right)^n * \sum_{i = 0}^{\lambda * n}{n \choose i} $$
We can make an estimate of this as follows, for $\lambda < \frac{1}{2}$:
$$ \left(\frac{1}{2}\right)^n * \sum_{i = 0}^{\lambda * n}{n \choose i}
\le 2^{-n} * 2^{nH_2(\lambda)} = 2^{n(H_2(\lambda) - 1)}$$
where $H_2(\lambda) = \lambda * \log_2( \frac{1}{\lambda} ) + (1 - \lambda) * \log_2( \frac{1}{1 - \lambda} ).$
Using $\lambda = 0.4425$, we have
$$H_2(0.4425) - 1 = 0.99044 - 1 = -0.00956$$
This gives us the estimate (essentially the same as above):
$$prob(j \le 0.4425 * 1000) \le 2^{-9.56} = 0.00132.$$
This means that you only have about 1 chance in 1000 of walking away a winner.
Now do you want to play the game?
To get a sense of what happens, let's look at examples of the results of playing
the "1000 steps" game. Following are some examples. In the first example, one player plays the
"1000 steps" game. Then we can see an example where we look at the mean
results of 500 players each playing the "1000 steps" game, and finally
2000 players each playing a "10,000 steps" game. These plots are
from a NetLogo model (that you use to explore these issues \ldots see link in the references).
The graphs are log-linear -- thus helping to see "exponential change"
over time.
\pagedone
\centerline{\includegraphics[width = 7in]{Images/1-for-1000-steps}}
Playing the 1.5 / 0.6 game for 1000 steps (NetLogo model)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/500-for-1000-steps}}
500 players playing the 1.5 / 0.6 game for 1000 steps (NetLogo model)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/2000-for-10000-steps}}
2000 players playing the 1.5 / 0.6 game for 10000 steps (NetLogo model)
\pagedone
We can see that despite the fact that the expected value of the game
at each step is $1.05$ (which is $ > 1$), nonetheless, this is not a good game to play
many times over (assuming that you "let it ride" each time).
{\hyperref{http://www.youtube.com/embed/8Xjr2hnOHiM?rel=0\&start=100\&end=177\&autoplay=0}{}{}
{Feeling lucky?}
% {http://www.youtube.com/embed/8Xjr2hnOHiM?rel=0\&start=100\&end=177\&autoplay=0}
}
All right -- let's play a slightly different game. The general setup is
the same, except instead of multiplying your wealth at each step
by either 1.5 (on heads) or 0.6 (on tails), we'll draw your "multiplier"
from a normal distribution with mean $\mu$ and variance $\sigma^2$.
In other words, we'll play the game with
$$v(j+1) = v(j) * X_j$$
where $X_j \in Normal(\mu, \sigma^2)$, and you "let it ride" $n$ times.
\pagedone
Actually, I'll do a little better than that \ldots I'll put a "floor" on the
multiplier $X$:
if $X < 0.001$, I'll replace $X$ by $0.001$ before I multiply. Note
that this will keep your "winnings" from ever going negative \ldots
Should you be willing to play this game for, say, $n = 1000$ with
$\mu = 1.05$ and $\sigma^2 = 0.15$?
I'll let you do some "estimated value" calculations.
But, following are some reasonably typical runs (in NetLogo) of
this, for 100, 500, 1000, and 10000 step games. In each case, we are
averaging 300 players' results. Again, these are from NetLogo,
and are log-linear plots.
\pagedone
\centerline{\includegraphics[width = 7in]{Images/100-steps-300-normal-105-015}}
100 step game, with $X \in Normal(1.05, 0.15)$, averaging 300 players.
\pagedone
\centerline{\includegraphics[width = 7in]{Images/500-steps-300-normal-105-015}}
500 step game, with $X \in Normal(1.05, 0.15)$, averaging 300 players.
\pagedone
\centerline{\includegraphics[width = 7in]{Images/1000-steps-300-normal-105-015}}
1,000 step game, with $X \in Normal(1.05, 0.15)$, averaging 300 players.
\pagedone
\centerline{\includegraphics[width = 7in]{Images/10000-steps-300-normal-105-015}}
10,000 step game, with $X \in Normal(1.05, 0.15)$, averaging 300 players.
\pagedone
For 100 steps, things look pretty good -- the return seems to be growing exponentially
(remember, the graphs are log-linear, so a straight line is exponential growth
-- or decay \ldots).
But, at 500 steps, things are starting to flop around. By the time we get to
1,000 steps, things are looking pretty bad. And by 10,000 steps, we are
clearly declining exponentially.
\pagedone
\sectionhead{Ergodicity}
So, what does this have to do with {\em ergodicity}?
In general terms, a dynamical system is called {\em ergodic} if the average over time of the system is
equal to the "space" (ensemble) average of the system. In other words, if a system is {\em ergodic}, we
can follow the trajectory of a single example (realization) of the system in order to explore the whole
space of possible behaviors of the system.
If a system is ergodic, we should be able to learn about the dynamics either by theoretical
analysis of the ensemble behavior of the system, or by tracking a relatively small number
of individual trajectories. In the examples we have been checking, trying to predict the long
term results (trajectories) by calculating {\em estimated values} in effect assumes that the
systems are ergodic -- in other words, that an individual trajectory will explore enough of the
space of possibilities to approximate the expected ensemble value.
What we have seen is that in our examples, {\em expected values} do not do a good job
of predicting individual trajectories. In fact, even following "many" (hundreds?) of trajectories
does not reflect the ensemble average expected value.
In the heads/tails (1.5 / 0.6) multiple steps games, larger payoffs are skewed to smaller
numbers of tails. In order to
be a winner, you need to have enough more heads than tails (i.e., something less than 45\%
tails). You do have a chance of being a large winner (e.g., if you got 1,000 heads in a row
in the "1,000 steps" game, you would win something like \$$10^{176}$ -- but of course the
probability of that happening would be $2^{-1000} \approx 10^{-301})$.
\pagedone
An important issue is the role of {\em time} ({\em history}) in the dynamics. For example,
if you are playing the 1,000 step game, and in the first 100 steps you have seen 50 tails,
there is no chance that later in the game your trajectory will explore portions of the
space having fewer than 50 tails. And, in general, each time you see a
tails, more of the "good" part of the space of possibilities is cut off from future
exploration. Thus, the history of the trajectory matters, and the system is not
{\em ergodic}. This means that the {\em ensemble average} (i.e., the
{\em expected value}) is not a good estimator of the results of actually playing
the "1,000 steps" game.
On the next few pages are some plots that show the relationship of the binomial
distribution to the "expected value" concentrations. To see things better, the
concentration of "expected value" is "normalized".
\pagedone
\centerline{\includegraphics[width = 7in]{Images/20-binomial-and-expected}}
Binomial distribution ($p = \frac{1}{2}$, $n = 20$)(blue) and \newline
"normalized concentration of expected value" ($1.5, 0.6$) (green)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/100-binomial-and-expected}}
Binomial distribution ($p = \frac{1}{2}$, $n = 100$)(blue) and \newline
"normalized concentration of expected value" ($1.5, 0.6$) (green)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/1000-binomial-and-expected}}
Binomial distribution ($p = \frac{1}{2}$, $n = 1000$)(blue) and \newline
"normalized concentration of expected value" ($1.5, 0.6$) (green)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/500-players-number-of-losses-in-1000}}
histogram of number of tails (losses) for 500 players, each playing the "1000 step" game
(NetLogo model)
\pagedone
We can see that for relatively "small" values of $n$, the $n$-steps game will be in a
realm where there is reasonable "overlap" between the central tendency of
the binomial distribution and the "concentration of expected value." But, as $n$
gets larger, there is separation between the two. Thus, at the beginning, the
"expected value" will be a reasonable estimate of the return from the game.
But, at some point, the non-ergodicity will take over, and the history of the
trajectory will win out.
By averaging the trajectories of many "players", we can increase the likelihood
that at least some players will (for a while, \ldots) explore the more favorable
regions of the space of possibilities. But, eventually, the {\em law of large
numbers} will take over, and with overwhelming probability, things will go
badly.
\pagedone
\sectionhead{More General Analysis}
One issue that we might consider is the effect of the precise values (1.5 and 0.6)
that we have chosen to explore. In fact, those values were chosen with some
malice aforethought, to make the demonstration / pedagogy more effective.
In particular, the values were chosen so that (at least), the {\em expected value}
was greater than 1 ($0.5 * 1.5 + 0.5 * 0.6 = 1.05$), but the product of the two
was less than 1 ($1.5 * 0.6 = 0.9$).
The "normal distribution multiplier" game is somewhat more general, and
thus perhaps amenable to some more careful analysis.
I will also note that mathematical history, and the propensity of researchers,
are such that in fact, we are likely to find better tools for analysis if we move
to a continuous version of the system, rather than a discrete, step-by-step
version. We'll come back to this issue later.
So, for a while, let's explore the $game(\mu, \sigma^2, n, s)$ system where
$\mu$ and $\sigma^2$ are the mean and variance of the normal distribution
from which the multipliers are taken, $n$ is the number of steps in the game,
and $s$ is the number of different trajectories we will follow
(typically, we will average over the $s$ trajectories \ldots).
The "value" of the game is thus
$$V(game(\mu, \sigma^2, n, s)) = \frac{1}{s}\sum_{i=1}^s\left(\prod_{j=1}^nX_j\right)$$
where $X_j \in normal(\mu, \sigma^2)$ are independent, identically distributed random
variables.
\pagedone
We are interested in exploring the interactions among the various parameters.
Following are several examples. In particular, they are examples of
$$game(\mu, \sigma^2, n, s)$$
with $\mu = 1.05$, $n = 20,000$, $s = 2000$, and $\sigma^2 = 0.12, 0.11, 0.10,$
$0.09, 0.08$.
Thus, we are doing a parameter sweep on $\sigma^2$, but keeping the other parameters
constant. There are some interesting differences for the various values of $\sigma^2$.
\pagedone
\centerline{\includegraphics[width = 7in]{Images/20000-steps-2000-normal-105-012}}
2000 players playing the "normal" $\mu=1.05, \sigma^2=0.12$ for 20,000 steps game
(NetLogo model)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/20000-steps-2000-normal-105-011}}
2000 players playing the "normal" $\mu=1.05, \sigma^2=0.11$ for 20,000 steps game
(NetLogo model)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/20000-steps-2000-normal-105-010}}
2000 players playing the "normal" $\mu=1.05, \sigma^2=0.10$ for 20,000 steps game
(NetLogo model)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/20000-steps-2000-normal-105-009}}
2000 players playing the "normal" $\mu=1.05, \sigma^2=0.09$ for 20,000 steps game
(NetLogo model)
\pagedone
\centerline{\includegraphics[width = 7in]{Images/20000-steps-2000-normal-105-008}}
2000 players playing the "normal" $\mu=1.05, \sigma^2=0.08$ for 20,000 steps game
(NetLogo model)
%\pagedone
% \centerline{\includegraphics[width = 7in]{Images/200000-steps-2000-normal-105-009}}
% 2000 players playing the "normal" $\mu=1.05, \sigma^2=0.09$ for 200000 steps game
% (NetLogo model)
\pagedone
We won't go into detailed analysis here (see references, below). But, what we find is that
the time average of the system is (largely) controlled by
$$\mu - \frac{\sigma^2}{2}.$$
We are mostly interested in $\mu$ greater than 1.
When $\mu - \frac{\sigma^2}{2} > 1$, the system (largely) grows exponentially,
more or less following the {\em expected value}, or {\em ensemble average} of the system.
If $\mu - \frac{\sigma^2}{2} = 1$, the long term system average is roughly constant.
If $\mu - \frac{\sigma^2}{2} < 1$, the system (generally) decays exponentially.
\pagedone
\sectionhead{More Multiplicative Random Walks}
Let's look at this from the perspective of multiplicative random walks. We have, for an
individual trajectory of the system
$$v(j+1) = v(j) * X_j$$
where $X_j$ is a random variable. Thus, for a game of $n$ steps, and assuming $v(0) = 1$,
we have
$$v(n) = \prod_{j = 1}^n X_j$$
In the cases we have been exploring, we are
assuming the $X_j$ are independent, identically distributed.
What we have, then, is that the $v(n)$ are also random variables. Let's see what we can
learn about the distribution of $v(n)$.
\pagedone
One thing we can do is take logs:
\begin{align*}
\ln(v(n)) &= \ln\left(\prod_{j = 1}^n X_j\right) \\
&= \sum_{j = 1}^n ln(X_j)
\end{align*}
Now, we can see that $ln(X_j)$ are also i.i.d. random variables. If we assume that
the variance of $ln(X_j)$ if finite, then a {\em Central Limit Theorem} will tell us that
$$ \frac{1}{n}\ln(v(n)) = \frac{1}{n}\sum_{j = 1}^n ln(X_j)$$
tends to a normal distribution as $n$ grows.
This indicates that we should think in terms of a {\em log-normal} distribution \ldots
\pagedone
The probability density function for a lognormal distribution is:
$$ f_X(x;\mu,\sigma) = \frac{1}{x \sigma \sqrt{2 \pi}}\, e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}},$$
with $x > 0$.
This gives us some ways to think about the system.
Another kind of approach is to move in the direction of continuous versions of the system, in
which case we can expect to work with a {\em stochastic differential equation} such as
$$dx = x(\mu dt + \sigma dW)$$
where we have a {\em drift} term $\mu$, $\sigma$ tells the amplitude of the noise, and
$$W(t) = \int_0^tdW$$
is a {\em Wiener process}.
\pagedone
The {\em Wiener process} is, in general, a continuous-time form of Brownian motion.
Things can be somewhat tricky when trying to solve (integrate) stochastic differential equations.
In particular, we have the "noise" term $dW$, and we need to be careful how we understand
or interpret or model this term. In general, $W(t)$ is considered to be a continuous but
nowhere differentiable function, so we must be thoughtful when doing integration. There
are various approaches to such integration -- an important approach is often called the
{\em It\= o calculus}, after Kiyoshi It\= o.
Much more discussion of these topics can be found in the references.
\pagedone
\sectionhead{Some Implications}
These issues of non-ergodicity have some interesting implications.
The first observation is that we should be careful in drawing conclusions about
{\em expected values} when the system under consideration may not be
ergodic. If it would take extremely large numbers of trajectories to do a reasonable
job of exploring the space of possibilities, we will need to find another way of
figuring out the typical {\em time average} of the system.
This also means that building models of economic or financial systems are
likely to require thoughtful analysis of any non-ergodic aspects of the system.
Another observation is that these issues have implications for developing
investment portfolios. As we have seen, an investment opportunity with
a given mean return ($\mu$) may or may not actually be a good investment,
depending on the variance of the system. A system with large variance may
not be just riskier, but in fact a bad investment.
This also has consequences for decisions about optimal leverage. If an investor
does not understand about the non-ergodic effects of large variance, they
may commit excessive leveraged resources to bad investments.
Of course, this is not a simple issue. For example, the standard deviation of an
investment opportunity (often called the {\em volatility}) is likely
not to be stationary over time:
\pagedone
\centerline{\includegraphics[width = 7in]{Images/800px-VIX}}
CBOE Volatility Index (VIX) from December 1985 to May 2012 (daily closings),
data from Chicago Board Options Exchange
\pagedone
\centerline{\includegraphics[width = 7in]{Images/Vix-S-and-P-500-1990-2013}}
CBOE Volatility Index (VIX) from 1990 to 2013 (daily closings),
with S\&P 500 \newline
data from Chicago Board Options Exchange
\pagedone
In practice, we will be trying to estimate parameters from sampled data, and
developing models of various financial instruments. What we have seen
here reminds us that we need to understand in some detail both the
mean and variance of the instruments.
In addition, a strong determiner of the ensemble average is the extreme
values. We need to consider the likelihood of observing those extreme
values, and beware of implicitly believing that extreme values are
adequately typical to be representative in meaningful ways \ldots
\pagedone
\centerline{\includegraphics[width = 7in]{Images/MLK-averages}}
\begin{center}Averages . . . cartoon by Joel Pett
{\hyperref{http://www.kentucky.com/joel-pett/}{}{}
{http://www.kentucky.com/joel-pett/}
}
\end{center}
\pagedone
The next step, of course, is to extend to both additive and multiplicative random walks,
such as the {\em Kesten Processes}:
$$ V(n + 1) = V(n) * X(n) + Y(n)$$
where both $X(n)$ and $Y(n)$ are random variables. These processes can
lead to {\em power law} distributions.
And so it goes \ldots
{\hyperref{http://www.youtube.com/embed/8Xjr2hnOHiM?rel=0\&start=100\&end=177\&autoplay=0}{}{}
{Feeling lucky?}
% {http://www.youtube.com/embed/8Xjr2hnOHiM?rel=0\&start=100\&end=177\&autoplay=0}
}
NOTE: Many thanks to Ole Peters for his various talks at the Santa Fe Institute's
Complex Systems Summer School, and for conversations with participants in the
Complex Systems Summer Schools over the years \ldots
{\hyperref{http://tuvalu.santafe.edu/events/workshops/index.php/Complex_Systems_Summer_School_2013}{}{}
{2013 SFI Complex Systems Summer School Wiki}
\pagedone
\footnotesize
\bibliographystyle{plain}
\marginpar{\hyperlink{Contents}{\hfil$\leftarrow$}}
\vspace{-\baselineskip}
\vspace{-\baselineskip}
\vspace{-\baselineskip}
\begin{thebibliography}{12}
\addcontentsline{toc}{section}{References}
\bibitem{netlogo}
Carter, Tom\newline
NetLogo model: NonErgodic.nlogo \newline
{\hyperref{http://csustan.csustan.edu/~tom/SFI-CSSS/Models/NetLogo/NonErgodic/NonErgodic.html}{}{}
{http://csustan.csustan.edu/~tom/SFI-CSSS/Models/NetLogo/NonErgodic/NonErgodic.html}
}
\bibitem{gabaix}
Gabaix, Xavier, \newline
{\em Zipf's Law for Cities: An Explanation}\newline
The Quarterly Journal of Economics (1999) 114 (3): 739-767.
{\hyperref{http://qje.oxfordjournals.org/content/114/3/739.short}{}{}
{http://qje.oxfordjournals.org/content/114/3/739.short}
}
\bibitem{gray}
Gray, Robert M., \newline
{\em Probability, Random Processes, and Ergodic Properties}\newline
{\hyperref{http://www-ee.stanford.edu/~gray/arp.html}{}{}
{http://www-ee.stanford.edu/\~\ gray/arp.html}
}
\bibitem{hamming}
Hamming, Richard W.\newline
{\em Coding and Information Theory}, 2nd Ed.,\newline
Prentice Hall, New Jersey, 1986.
\bibitem{horst}
Horst, Ulrich, \newline
{\em The Stochastic Equation $Y_{t+1} = A_tY_t + B_t$ with Non-Stationary Coefficients}, March 30, 2001\newline
{\hyperref{http://horst.qfl-berlin.de/sites/files/u2/Stochastic-eqn.pdf}{}{}
{http://horst.qfl-berlin.de/sites/files/u2/Stochastic-eqn.pdf}
}
\bibitem{lau}
Lau, A. W. C., and Lubensky, T. C.\newline
{\em State-dependent diffusion: Thermodynamic consistency and its path integral formulation}\newline
Arxiv.org, 15 Jul 2007,\newline
{\hyperref{http://arxiv.org/abs/0707.2234}{}{}
{http://arxiv.org/abs/0707.2234}
}
\bibitem{mitzenmacher}
Mitzenmacher, Michael, \newline
{\em A Brief History of Generative Models for
Power Law and Lognormal Distributions}\newline
Internet Mathematics Vol. 1, No. 2: 226-251
{\hyperref{http://www.eecs.harvard.edu/~michaelm/postscripts/im2004a.pdf}{}{}
{http://www.eecs.harvard.edu/\~\ michaelm/postscripts/im2004a.pdf}
}
\bibitem{navarro-barrientos}
Navarro-Barrientos, Jesus E., Cantero-Alverez, Ruben, Rodrigues, Joao, and Schweitzer, Frank \newline
{\em Investments in random environments}\newline
{\hyperref{http://www.researchgate.net/publication/1766408\_Investments\_in\_Random\_Environments/file/32bfe50d1a002eb4cf.pdf}{}{}
{http://www.researchgate.net/publication/1766408
\_Investments\_in\_Random\_Environments/file/32bfe50d1a002eb4cf.pdf}
}
\bibitem{nolan1}
Nolan, John, \newline
{\em Stable Distributions Page}\newline
{\hyperref{http://academic2.american.edu/~jpnolan/stable/stable.html}{}{}
{http://academic2.american.edu/\~\ jpnolan/stable/stable.html}
}
\bibitem{nolan2}
Nolan, John, \newline
{\em Fitting Data and Assessing Goodness-of-Fit with Stable Distributions}\newline
{\hyperref{http://academic2.american.edu/~jpnolan/stable/DataAnalysis.pdf}{}{}
{http://academic2.american.edu/\~\ jpnolan/stable/DataAnalysis.pdf}
}
\bibitem{peters1}
Peters, Ole, \newline
{\em Optimal Leverage From Non-ergodicity}\newline
Arxiv.org, 9 Aug 2010,\newline
{\hyperref{http://arxiv.org/abs/0902.2965}{}{}
{http://arxiv.org/abs/0902.2965}
}
\bibitem{peters2}
Peters, Ole, and Klein, William\newline
{\em Ergodicity breaking in geometric Brownian motion}\newline
Arxiv.org, 4 Mar 2013,\newline
{\hyperref{http://arxiv.org/abs/1209.4517}{}{}
{http://arxiv.org/abs/1209.4517}
}
\bibitem{peters3}
Peters, Ole\newline
{\em The Time Resolution of the St Petersburg Paradox}\newline
Phil. Trans. R. Soc. A 13 December 2011 vol. 369 no. 1956 4913-4931\newline
{\hyperref{http://rsta.royalsocietypublishing.org/content/369/1956/4913.abstract}{}{}
{http://rsta.royalsocietypublishing.org/content/369/1956/4913.abstract}
}
\bibitem{peters4}
Peters, Ole\newline
{\em On Time and Risk}\newline
Santa Fe Institute Bulletin, 2009, p.36-41\newline
{\hyperref{http://tuvalu.santafe.edu/~ole/Peters2009.pdf}{}{}
{http://tuvalu.santafe.edu/\~\ ole/Peters2009.pdf}
}
\bibitem{peters5}
Peters, Ole, \newline
{\em Menger 1934 Revisited}\newline
arXiv:1110.1578 [q-fin.RM], 7 Oct 2011\newline
{\hyperref{http://arxiv.org/abs/1110.1578}{}{}
{http://arxiv.org/abs/1110.1578}
}
\bibitem{redner}
Redner, S.\newline
{\em Random Multiplicative Processes: An Elementary Tutorial}\newline
Am. J. Physics, 58(3), March 1990\newline
{\hyperref{http://www.researchgate.net/publication/2894393\_Random\_Multiplicative\_Processes\_An\_Elementary\_Tutorial/file/3deec519b7cb07a469.pdf}{}{}
{http://www.researchgate.net/publication/2894393
\_Random\_Multiplicative\_Processes\_An\_Elementary
\_Tutorial/file/3deec519b7cb07a469.pdf}
}
\bibitem{richardson}
Richardson, L.\newline
{\em Some Measurements of Atmospheric Turbulence}\newline
Philosophical Transactions of the Royal Society of London. Series A,
Containing Papers of a Math. or Phys. Character (1896-1934). 1921-01-01. 221:1-28\newline
{\hyperref{http://archive.org/details/philtrans03439141}{}{}
{http://archive.org/details/philtrans03439141}
}
\bibitem{sornette1}
Sornette, Didier, and Cont, Rama\newline
{\em Convergent multiplicative processes repelled from zero: power laws and truncated power laws}\newline
arXiv:cond-mat/9609074, 7 Sep 1996\newline
{\hyperref{http://arxiv.org/abs/cond-mat/9609074}{}{}
{http://arxiv.org/abs/cond-mat/9609074}
}
\bibitem{sornette2}
Sornette, Didier,\newline
{\em Multiplicative processes and power laws}\newline
arXiv:cond-mat/9708231, 23 Feb 1998\newline
{\hyperref{http://arxiv.org/abs/cond-mat/9708231}{}{}
{http://arxiv.org/abs/cond-mat/9708231}
}
\pagedone
\bibitem{thurner}
Thurner, Stefan, and Hanel, Rudolf\newline
{\em The Entropy of Non-ergodic Complex Systems -- A Derivation from First Principles}\newline
International Journal of Modern Physics: Conference Series Vol. 16 (2012) 105-115,
World Scientific Publishing Company\newline
{\hyperref{http://www.worldscientific.com/doi/pdf/10.1142/S2010194512007817}{}{}
{http://www.worldscientific.com/doi/pdf/10.1142/S2010194512007817}
}
\end{thebibliography}
\hyperlink{Contents}{\hfil To top $\leftarrow$}
\end{document}