For whatever reason I decided to type my MATH 2114, Introduction to Linear Algebra, notes in Latex.
\documentclass[tikz, 12pt, letter]{article}
\usepackage[margin=0.7in]{geometry}
\usepackage{amsmath, mathtools}
\usepackage{breqn}
\usepackage[makeroom]{cancel}
\usepackage{graphicx}
\usepackage[utf8]{inputenc}
\usepackage{ stmaryrd }
\usepackage{multicol}
\usepackage{tasks}
\usepackage{tabularx}
\usepackage{tasks}
\usepackage{vwcol}
\usepackage{booktabs}
\usepackage{tabularray}
\usepackage{pgfplots}
\usepackage{fancyhdr}
\usepackage{xcolor}
\usepackage{soul}
\usepackage[none]{hyphenat}
\usepackage{amsfonts}
\usepackage{tkz-euclide}
\usepackage{esvect}
\usepackage{titlesec}
\usepackage{setspace}
\usepackage[many]{tcolorbox}
\newtcolorbox{definition}[1]{colback=red!5!white,colframe=red!75!black, fonttitle=\bfseries,title=#1, enhanced jigsaw, breakable}
\newtcolorbox{example}[1]{colback=blue!5!white,colframe=blue!75!black, fonttitle=\bfseries,title=#1, enhanced jigsaw, breakable}
\newtcolorbox{theorem}[1]{colback=yellow!5!white,colframe=yellow!75!black, fonttitle=\bfseries,title=#1}
\usepackage{hyperref}
\usepackage{nicematrix}
\hypersetup{
colorlinks,
citecolor=black,
filecolor=black,
linkcolor=black,
urlcolor=black
}
\usepgflibrary{plotmarks}
\usetikzlibrary{math, tikzmark, shapes.geometric, fit, positioning, graphs}
\usepackage{blkarray, bigstrut}
\usepackage{esvect, systeme,pifont, enumitem,amsthm,amssymb,gensymb}
\pgfplotsset{compat=newest}
\def\newline{\hfill \break}
\def\({\left(}
\def\){\right)}
\def\ddx{\frac{d}{dx}}
\def\[{\left[}
\def\]{\right]}
\def\ddy{\frac{dy}{dx}}
\newcommand\aug{\fboxsep=-\fboxrule\!\!\!\fbox{\strut}\!\!\!}
\DeclareMathOperator{\arcsec}{arcsec}
\DeclareMathOperator{\arccot}{arccot}
\DeclareMathOperator{\arccsc}{arccsc}
\setul{0.5ex}{0.2ex}
\setulcolor{black}
\newcommand{\xmark}{\text{\ding{55}}}
\renewcommand{\qedsymbol}{$\blacksquare$}
\DeclarePairedDelimiter\abs{\lvert}{\rvert}%
\DeclarePairedDelimiter\norm{\lVert}{\rVert}%
% Swap the definition of \abs* and \norm*, so that \abs
% and \norm resizes the size of the brackets, and the
% starred version does not.
\makeatletter
\let\oldabs\abs
\def\abs{\@ifstar{\oldabs}{\oldabs*}}
%
\let\oldnorm\norm
\def\norm{\@ifstar{\oldnorm}{\oldnorm*}}
\makeatother
\title{MATH 2114 Notes}
\author{Tyler Kruszewski}
\date{\today}
\begin{document}
\maketitle
\begin{flushleft}
\textcolor{red}{Disclaimer: These notes are in no way a replacement for your own, there may be typos, wrong information, difference in notation or a difference caused by a different instructor. You should always prioritize your own notes and at best use these as a review.}
\end{flushleft}
\tableofcontents
\newpage
\begin{flushleft}
\section{Section 1.1 The Geometry and Algebra of Vectors}
\subsection{Introduction}
\begin{center}
Scalars \hspace{10ex} Vectors \hspace{10ex} Matrices
\end{center}
Scalars are Real Numbers ($\mathbb{R}$)
\begin{definition}{Definition: Vectors}
An \textbf{$n$-dimensional column vector} $\vv{v}$ is an ordered list of $n$ components:
$\vv{v} = \begin{bsmallmatrix} v_1 \\ v_2 \\ \vdots \\ v_n \end{bsmallmatrix}$.
\end{definition}
The set of all real $n$-dimensional column vectors is $\mathbb{R}^n=\left\{\begin{bsmallmatrix} v_1 \\ v_2 \\ \vdots \\ v_n \end{bsmallmatrix}\Big| v_1, v_2, \hdots, v_n\in \mathbb{R} \right\}$ \\
With the $\Big|$ meaning ``such that'' and $\in$ meaning ``as an element of'' as well as $n$ meaning number of components and $\mathbb{R}$ being the type of components (real numbers)
\begin{example}{Example: Vectors In A Set Of Real Numbers}
\begin{itemize}
\item $\begin{bmatrix} 2 \\ -3 \end{bmatrix} \in \mathbb{R}^2$
\item $\begin{bmatrix} 1 \\ 0 \\ -3 \\ 8\end{bmatrix} \in \mathbb{R}^4$
\end{itemize}
\end{example}
\pagebreak
\subsection{Two Basic Vector Operations: Addition And Scalar Multiplication}
To add 2 vectors, they must have the same number of components.
\begin{example}{Example: Adding Two Vectors}
\begin{itemize}
\item $\begin{bmatrix}3 \\ -2 \\ 1 \\ 8 \end{bmatrix} + \begin{bmatrix} 1 \\ 1 \\ -1 \\ 3 \end{bmatrix} = \begin{bmatrix} 4 \\ -1 \\ 0 \\ 11 \end{bmatrix}$ \\
\item $\begin{bmatrix} 3 \\ 2 \\ 1 \end{bmatrix} + \begin{bmatrix} 1 \\ 8\end{bmatrix} = \text{undefined}$ \\
\item $2\begin{bmatrix} 1 \\ 4 \\ -2 \end{bmatrix} = \begin{bmatrix}2\(1\) \\ 2\(4\) \\ 2\(-2\) \end{bmatrix} = \begin{bmatrix} 2 \\ 8\\ -4\end{bmatrix}$
\end{itemize}
\end{example}
For vectors $\vv{u}, \vv{v} \in \mathbb{R}^n$ and scalars $c,d \in \mathbb{R}$ we say $c\vv{u}+d\vv{v}$ is a \textbf{linear combination} of $\vv{u}$ and $\vv{v}$.
\begin{example}{Example: Linear Combination}
$$2\begin{bmatrix}1\\1\\-1\\3\end{bmatrix}-1\begin{bmatrix}2\\1\\4\\1\end{bmatrix}=\begin{bmatrix}2\\2\\-2\\6\end{bmatrix}+\begin{bmatrix}-2\\-1\\-4\\-1\end{bmatrix}=\begin{bmatrix}0\\1\\-6\\5\end{bmatrix}$$
\begin{center}
$\begin{bsmallmatrix}0\\1\\-6\\5\end{bsmallmatrix}$ is a linear combination of $\begin{bsmallmatrix}1\\1\\-1\\3\end{bsmallmatrix}$ and $\begin{bsmallmatrix}2\\1\\4\\1\end{bsmallmatrix}$
\end{center}
\end{example}
\begin{definition}{Definition: Linear Combination}
Let $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\in\mathbb{R}^n$ then a \textbf{linear combination} of $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ is a vector $\vv{w}$ in $\mathbb{R}^n$ such that $\vv{w}=c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_k\vv{v_k}$ for some scalars $c_1,c_2,\hdots,c_k\in\mathbb{R}$.
\end{definition}
\begin{example}{Example: Is It A Linear Combination?}
\begin{itemize}
\item {Is $\begin{bsmallmatrix}1\\2\\3\end{bsmallmatrix}$ a linear combination of $\begin{bsmallmatrix}1\\1\\0\end{bsmallmatrix},\begin{bsmallmatrix}2\\-1\\0\end{bsmallmatrix},\begin{bsmallmatrix}1\\-1\\0\end{bsmallmatrix}$?
$$c_1\begin{bmatrix}1\\1\\0\end{bmatrix}+c_2\begin{bmatrix}2\\-1\\0\end{bmatrix}+c_3\begin{bmatrix}1\\-1\\0\end{bmatrix}=\begin{bmatrix}c_1\\c_1\\0\end{bmatrix} + \begin{bmatrix}2c_2\\-c_2\\0\end{bmatrix}+\begin{bmatrix}c_3\\-c_3\\0\end{bmatrix}\ne\begin{bmatrix}1\\2\\3\end{bmatrix}$$
\begin{center} Doesn't work as row 3 is all zeros and $0 + 0 + 0 \ne 3$ thus the answer is no.\end{center}}
\item {Is $\begin{bsmallmatrix}1\\2\\3\end{bsmallmatrix}$ a linear combination of $\begin{bsmallmatrix}1\\1\\1\end{bsmallmatrix},\begin{bsmallmatrix}1\\1\\0\end{bsmallmatrix},\begin{bsmallmatrix}-1\\1\\0\end{bsmallmatrix}$?
\begin{align*}
c_1\begin{bmatrix}1\\1\\1\end{bmatrix}+c_2\begin{bmatrix}1\\1\\0\end{bmatrix}+c_3\begin{bmatrix}-1\\1\\0\end{bmatrix}&=\begin{bmatrix}1\\2\\3\end{bmatrix} \\
\begin{bmatrix}c_1\\c_1\\c_1\end{bmatrix}+\begin{bmatrix}c_2\\c_2\\0\end{bmatrix}+\begin{bmatrix}-c_3\\c_3\\0\end{bmatrix}&=\begin{bmatrix}1\\2\\3\end{bmatrix}
\end{align*}
\begin{align*}
c_1+c_2-c_3&=1 & c_1+c_2+c_3 &= 2 & c_1 = 3 \\
3 + c_2 - c_3 &= 1 & 3+ c_2+c_3&=2 \\
c_2-c_3&=-2 & c_2+c_3 &=-1 \\
c_2 &= -2 + c_3 & -2 + c_3 + c_3 &=-1 \\
c_2 &=-2+\frac{1}{2} & 2c_3&=1 \\
c_2 &=-1.5 & c_3 &=\frac{1}{2}
\end{align*}
$$\begin{bmatrix}1\\2\\3\end{bmatrix}=3\begin{bmatrix}1\\1\\1\end{bmatrix}-\frac{3}{2}\begin{bmatrix}1\\1\\0\end{bmatrix}+\frac{1}{2}\begin{bmatrix}-1\\1\\0\end{bmatrix}\text{ thus yes}$$}
\end{itemize}
\end{example}
\begin{definition}{Definition: Zero Vector}
A \textbf{zero vector} $\vv{0}$ is a vector whose components are all zero.
\begin{example}{Example: Zero Vector}
\begin{itemize}
\item $\begin{bmatrix}0\\0\end{bmatrix}\in\mathbb{R}^2$
\item $\begin{bmatrix}0\\0\\0\\0\\0\end{bmatrix}\in\mathbb{R}^5$
\end{itemize}
\end{example}
\end{definition}
\pagebreak
\subsection{Algebraic Properties of Addition and Scalar Multiplication}
For $\vv{x}, \vv{y}, \vv{z} \in \mathbb{R}^n$ and $s, t \in \mathbb{R}$,
\begin{tasks}[label=\arabic*.,ref=\
arabic*](2)
\task $\vv{x}+\vv{y}=\vv{y}+\vv{x}$ (Commutativity)
\task $\(\vv{x}+\vv{y}\)+\vv{z}=\vv{x}+\(\vv{y}+\vv{z}\)$ (Associativity)
\task $\vv{x}+0=\vv{x}$
\task $\vv{x}+\(-\vv{x}\)=0$
\task $\(s+t\)\vv{x}=s\vv{x}+t\vv{x}$ (Distributivity)
\task $s\(\vv{x}+\vv{y}\)=s\vv{x}+s\vv{y}$ (Distributivity)
\task $\(st\)\vv{x} = s\(t\vv{x}\)$
\task $1\vv{x} = \vv{x}$
\end{tasks}
\begin{proof}
Let $\vv{u}=\begin{bmatrix}u_1,u_2,\hdots,u_n\end{bmatrix},\vv{v}=\begin{bmatrix}v_1,v_2,\hdots,v_n\end{bmatrix}\text{, and }\vv{w}=\begin{bmatrix}w_1,w_2,\hdots,w_n\end{bmatrix}.$
\begin{itemize}
\item Commutativity
\begin{align*}
\vv{u}+\vv{v}&=\begin{bmatrix}u_1,u_2,\hdots,u_n\end{bmatrix}+\begin{bmatrix}v_1,v_2,\hdots,v_n\end{bmatrix}\\
&=\begin{bmatrix}u_1+v_1,u_2+v_2,\hdots,u_n+v_n\end{bmatrix} \text{ (Definition of Vector Addition)}\\
&=\begin{bmatrix}v_1+u_1,v_2+u_2,\hdots,v_n+u_n\end{bmatrix} \text{ (Commutativity of Addition of Real Numbers)}\\
&=\begin{bmatrix}v_1,v_2,\hdots,v_n\end{bmatrix}+\begin{bmatrix}u_1,u_2,\hdots,u_n\end{bmatrix} \text{ (Definition of Vector Addition)} \\
&=\vv{v}+\vv{u}
\end{align*}
\item Associativity
\begin{align*}
\(\vv{u}+\vv{v}\)+\vv{w}&=\(\begin{bmatrix}u_1,u_2,\hdots,u_n\end{bmatrix}+\begin{bmatrix}v_1,v_2,\hdots,v_n\end{bmatrix}\)+\begin{bmatrix}w_1,w_2,\hdots,w_n\end{bmatrix} \\
&=\begin{bmatrix}u_1+v_1,u_2+v_2,\hdots,u_n+v_n\end{bmatrix}+\begin{bmatrix}w_1,w_2,\hdots,w_n\end{bmatrix} \\
&=\begin{bmatrix}\(u_1+v_1\)+w_1,\(u_2+v_2\)+w_2,\hdots,\(u_n+v_n\)+w_n\end{bmatrix} \\
&=\begin{bmatrix}u_1+\(v_1+w_1\),\hdots,u_n+\(v_n+w_n\)\end{bmatrix} \text{ (Associativity of Addition of Real Numbers)} \\
&=\begin{bmatrix}u_1,u_2,\hdots,u_n\end{bmatrix}+\begin{bmatrix}v_1+w_1,v_2+w_2,\hdots,v_n+w_n\end{bmatrix} \\
&=\begin{bmatrix}u_1,u_2,\hdots,u_n\end{bmatrix}+\(\begin{bmatrix}v_1,v_2,\hdots,v_n\end{bmatrix}+\begin{bmatrix}w_1,w_2,\hdots,w_n\end{bmatrix}\) \\
&=\vv{u}+\(\vv{v}+\vv{w}\)
\end{align*}
\end{itemize}
\end{proof}
\begin{definition}{Definition: Vector Space}
$\mathbb{R}^n$ is called a \textbf{vector space} (a set of vectors along with addition and scalar multiplication that satisfy the above 8 properties).
\end{definition}
\pagebreak
\subsection{Geometric Representation of Vectors in $\mathbb{R}^2$ and $\mathbb{R}^3$}
\begin{definition}{Definition: Standard Position}
A vector is in \textbf{standard position} if its initial point is the origin.
\end{definition}
\begin{tblr}{width=\textwidth,
cells = {halign = c, valign = m}, colspec={X X}}
{$\mathbb{R}^2=\left\{\begin{bmatrix}x \\ y\end{bmatrix}\Big|x,y\in\mathbb{R}\right\}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2, ymax = 2,
xmin = -2, xmax = 2,
xticklabels = {100},
yticklabels = {100},
ticks = none,
axis line style = {<->, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*] coordinates {(0.5,-0.5)} node[right] {$A$} node[below] {$\(a_1, a_2\)$};
\addplot[mark=*] coordinates {(0.25,1)} node[below right] {$B$} node[above right] {$\(b_1, b_2\)$};
\draw[->, red, ultra thick] (0.5,-0.5) -- (0.25,1);
\addplot[mark=*] coordinates {(0,0)};
\addplot[mark=*] coordinates {(-0.25,1.5)} node[above left] {$\(b_1-a_1, b_2-a_2\)$};
\draw[->, red, ultra thick] (0,0) -- (-0.25, 1.5);
\end{axis}
\end{tikzpicture}}
$\begin{aligned}
\vv{v}&=\vv{AB}=\begin{bmatrix}b_1 -a_1\\b_2-a_2\end{bmatrix}\text{Compact Form} \\
&= \vv{\(0,0\)\(b_1-a_1,b_2-a_2\)}
\end{aligned}$}
\end{tblr}
\begin{center}
\begin{tikzpicture}[scale = 2]
\draw[dotted, blue, ultra thick, <->] (-1.5, -1.5) -- (1.5, 1.5);
\draw[->, ultra thick] (0,0) -- (0.5, 0.5) node[right] {$\vv{u}$};
\fill (0,0) circle[radius=1pt] node[left] {$\vv{0}$};
\draw[->, ultra thick] (2,0) -- (1.75, -0.25) node[above, pos = 0] {$-\frac{1}{2}\vv{u}$};
\fill (2, 0) circle[radius = 1pt];
\draw[->, ultra thick] (4,0) -- (5, 1) node[right] {$2\vv{u}$};
\fill (4, 0) circle[radius = 1pt];
\fill (6, 0) circle[radius = 1pt] node[right] {$0\vv{u} = \vv{0}$};
\end{tikzpicture} \\
\begin{flushleft}
All scalar multiples of $\vv{u}$ (in standard position) lie on the dotted line. This is due to all non zero multiples of $\vv{u}$ being parallel to $\vv{u}$.\\
\end{flushleft}
\newline
\begin{tblr}{colspec = {X X X}, cells = {halign = c}, width = \textwidth}
{\begin{tikzpicture}[scale = 4]
\draw[->, ultra thick] (0,0) -- (0.86602540378443864676372317075294, 0.5) node[rotate = 30, above, pos = 0.5] {$\vv{u}$};
\fill (0,0) circle[radius = 1pt];
\end{tikzpicture}} &
{\begin{tikzpicture}[scale = 4]
\draw[->, ultra thick] (0,0) -- (0.5, -0.86602540378443864676372317075294) node[rotate = -60, above, pos = 0.5] {$\vv{v}$};
\fill (0,0) circle[radius = 1pt];
\end{tikzpicture}} &
{\begin{tikzpicture}[scale = 4]
\fill (1.3660254037844386467637231707529, -0.36602540378443864676372317075294) circle[radius = 1pt];
\draw[->, ultra thick] (0,0) -- (0.86602540378443864676372317075294, 0.5) node[rotate = 30, above, pos = 0.5] {$\vv{u}$};
\draw[->, ultra thick] (0.86602540378443864676372317075294, 0.5) -- (1.3660254037844386467637231707529, -0.36602540378443864676372317075294) node[rotate = -60, above, pos = 0.5] {$\vv{v}$};
\draw[->, ultra thick, red] (0, 0) -- (1.3660254037844386467637231707529, -0.36602540378443864676372317075294) node[rotate = -15, below, pos = 0.5] {$\vv{u} + \vv{v}$};
\fill (0,0) circle[radius = 1pt];
\end{tikzpicture}}
\end{tblr} \\
Head to Tail Rule \\
\newline
\begin{tikzpicture}[scale = 4]
\fill (1.3660254037844386467637231707529, -0.36602540378443864676372317075294) circle[radius = 1pt];
\draw[->, ultra thick] (0,0) -- (0.86602540378443864676372317075294, 0.5) node[rotate = 30, above, pos = 0.5] {$\vv{u}$};
\draw[->, ultra thick, dotted] (0.86602540378443864676372317075294, 0.5) -- (1.65469976687, -0.86602540378443864676372317075294);
\draw[->, ultra thick] (0,0) -- (0.5, -0.86602540378443864676372317075294) node[rotate = -60, below, pos = 0.5] {$\vv{v}$};
\draw[->, ultra thick, dotted] (0.5, -0.86602540378443864676372317075294) -- (1.75, -0.144337567297);
\draw[->, ultra thick, red] (0, 0) -- (1.3660254037844386467637231707529, -0.36602540378443864676372317075294) node[rotate = -15, below, pos = 0.5] {$\vv{u} + \vv{v}$};
\fill (0,0) circle[radius = 1pt];
\end{tikzpicture} \\
Paralleogram Rule
\end{center}
\begin{example}{Example: Is Every Vector In $\mathbb{R}^2$ A Linear Combination?}
Is every vector in $\mathbb{R}^2$ a linear combination of $\vv{a}=\begin{bsmallmatrix}1\\1\end{bsmallmatrix}$ and $\vv{b}=\begin{bsmallmatrix}2\\-1\end{bsmallmatrix}$? \\
$$\begin{bmatrix}x\\y\end{bmatrix}=c_1\begin{bmatrix}1\\1\end{bmatrix}+c_2\begin{bmatrix}2\\-1\end{bmatrix}$$
\begin{tblr}{width=\textwidth,
cells = {halign = c, valign = m}, colspec={X X}}
{$\begin{aligned}
\vv{a}-\vv{b} &= \vv{v} \\
\frac{5}{3}\vv{b}-\frac{3}{4}\vv{a}&=\vv{w}
\end{aligned}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}[scale = 1.3]
\fill (1,1) circle[radius = 3pt];
\fill (-1,2) circle[radius = 3pt];
\draw[->, ultra thick] (0,0) -- (1,1) node[above, pos = 0.5, rotate = 45] {$\vv{a}$};
\draw[<->, ultra thick, dotted] (-3,-3) -- (2,2);
\draw[<->, ultra thick, dotted] (-2.5,1.25) -- (3.5,-1.75);
\draw[red, ->, ultra thick] (0, 0) -- (3.3333333333333333333333333333333, -1.6666666666666666666666666666667) node[above, pos = 0.8, rotate = 333.43494882292]{$\frac{5}{3}\vv{b}$};
\fill (2,-1) circle[radius = 3pt];
\draw[->, ultra thick] (0,0) -- (2,-1) node[above, pos = 0.5, rotate = 333.43494882292] {$\vv{b}$};
\draw[->, ultra thick, blue] (0,0) -- (-2,1) node[above, pos = 0.5, rotate = -26.56505117708] {$-\vv{b}$};
\draw[->, ultra thick, blue] (-2, 1) -- (-1, 2) node[pos = 0.5, rotate = 45, above] {\vv{a}};
\draw[->, ultra thick, red] (0,0) -- (-0.75,-0.75) node[above, pos = 0.5, rotate = 45] {$-\frac{3}{4}\vv{a}$};
\draw[->, ultra thick, purple] (0,0) -- (-1, 2) node[above, pos = 0.5, rotate = -63.43494882292] {$\vv{v}$};
\fill (2.5833333333333333333333333333333,-2.4166666666666666666666666666667) circle[radius = 3pt];
\draw[->, ultra thick, red] (3.3333333333333333333333333333333, -1.6666666666666666666666666666667) -- (2.5833333333333333333333333333333,-2.4166666666666666666666666666667) node[above, pos = 0.5, rotate = 45] {$-\frac{3}{4}\vv{a}$};
\draw[->, ultra thick, orange] (0,0) -- (2.5833333333333333333333333333333,-2.4166666666666666666666666666667) node[above, pos = 0.5, rotate = 316.909152433] {\vv{w}};
\fill (0,0) circle[radius = 3pt];
\end{tikzpicture}}}
\end{tblr}
\end{example}
\begin{definition}{Definition: Span $\mathbb{R}^n$}
We say the vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ \textbf{span or generate $\mathbb{R}^n$} if every vector in $\mathbb{R}^n$ is a linear combination of $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$.
\begin{example}{Example: Vectors That Span $\mathbb{R}^2$}
$\begin{bmatrix}1 \\ 1\end{bmatrix}$ and $\begin{bmatrix}2\\-1\end{bmatrix}$ span $\mathbb{R}^2$.
\end{example}
\end{definition}
\begin{example}{Example: Do They Span $\mathbb{R}^2$}
Do the following vectors span $\mathbb{R}^2$?
\begin{tasks}[style = enumerate](2)
\task{$\begin{bmatrix} 1 \\2\end{bmatrix}$
\raisebox{-.5\height}{\begin{tikzpicture}[scale = 0.25]
\fill (0,0) circle[radius = 10pt];
\draw[->, black, ultra thick] (0,0) -- (1,2);
\draw[<->, black, ultra thick, dotted] (-2,-4) -- (2,4);
\end{tikzpicture}} no}
\task{$\begin{bmatrix} 1 \\2\end{bmatrix}$, $\begin{bmatrix}-2\\-4\end{bmatrix}$
\raisebox{-.5\height}{\begin{tikzpicture}[scale = 0.25]
\fill (0,0) circle[radius = 10pt];
\draw[->, black, ultra thick] (0,0) -- (1,2);
\draw[->, black, ultra thick] (0,0) -- (-2,-4);
\draw[<->, black, ultra thick, dotted] (-3,-6) -- (3,6);
\end{tikzpicture}} no}
\task{$\begin{bmatrix} 1 \\2\end{bmatrix}$, $\begin{bmatrix}-2\\-4\end{bmatrix}$, $\begin{bmatrix}3\\-3\end{bmatrix}$
\raisebox{-.5\height}{\begin{tikzpicture}[scale = 0.25]
\fill (0,0) circle[radius = 10pt];
\draw[->, black, ultra thick] (0,0) -- (1,2);
\draw[->, black, ultra thick] (0,0) -- (-2,-4);
\draw[<->, black, ultra thick, dotted] (-3,-6) -- (3,6);
\draw[<->, black, ultra thick, dotted] (-3,3) -- (4,-4);
\draw[->, black, ultra thick] (0,0) -- (3,-3);
\end{tikzpicture}} yes}
\task{$\begin{bmatrix} 1 \\2\end{bmatrix}$, $\begin{bmatrix}-2\\-4\end{bmatrix}$, $\begin{bmatrix}3\\-3\end{bmatrix}$, $\begin{bmatrix}1\\0\end{bmatrix}$
\raisebox{-.5\height}{\begin{tikzpicture}[scale = 0.25]
\fill (0,0) circle[radius = 10pt];
\draw[->, black, ultra thick] (0,0) -- (1,2);
\draw[->, black, ultra thick] (0,0) -- (-2,-4);
\draw[<->, dotted, black, ultra thick] (-2,0) -- (2,0);
\draw[->, black, ultra thick] (0,0) -- (1,0);
\draw[<->, black, ultra thick, dotted] (-3,-6) -- (3,6);
\draw[<->, black, ultra thick, dotted] (-3,3) -- (4,-4);
\draw[->, black, ultra thick] (0,0) -- (3,-3);
\end{tikzpicture}} yes}
\end{tasks}
To span $\mathbb{R}^2$, we need at least 2 nonzero vectors in $\mathbb{R}^2$ that are not parallel (not scalar multiples of each other).
\end{example}
\newpage
\section{Section 1.2 Dot Product}
\subsection{Introduction}
\begin{definition}{Definition: Dot Product}
The \textbf{dot product} of vectors $\vv{u},\vv{v}\in\mathbb{R}^n$ is a scalar in $\mathbb{R}$ and defined by:
$$\vv{u}\cdot\vv{v}=\begin{bmatrix}u_1\\u_2\\\vdots\\u_n\end{bmatrix}\cdot\begin{bmatrix}v_1\\v_2\\\vdots\\v_n\end{bmatrix}=u_1v_1+u_2v_2+\hdots+u_nv_n$$
\begin{example}{Example: Dot Product}
$\begin{bmatrix}2\\1\\-3\\4\end{bmatrix}\cdot\begin{bmatrix}1\\3\\1\\0\end{bmatrix}=2+3+\(-3\)+0=2$
\end{example}
Note: $\vv{u}\cdot\vv{v}\ne\vv{u}\vv{v}$ as $\vv{u}\vv{v}=\text{undefined}$
\end{definition}
\newpage
\subsection{Properties of Dot Product}
Let $\vv{u},\vv{v},\vv{w}\in\mathbb{R}^n$ and $c\in\mathbb{R}$. Then
\begin{enumerate}
\item$\vv{u}\cdot\vv{v}=\vv{v}\cdot\vv{u}$ (Commutativity)
\item$\vv{0}\cdot\vv{u}=0\(\text{for } \vv{0}\in\mathbb{R}^n\)$
\item$\vv{u}\cdot\(\vv{v}+\vv{w}\)=\vv{u}\cdot\vv{v}+\vv{u}\cdot\vv{w}$ (Distributivity)
\item$\(c\vv{u}\)\cdot\vv{v}=\vv{u}\cdot\(c\vv{v}\)=c\(\vv{u}\cdot\vv{v}\)$
\item$\vv{u}\cdot\vv{u}\ge0\text{ and }\vv{u}\cdot\vv{u}=0\text{ if and only if }\vv{u}=\vv{0}$
\end{enumerate}
\begin{proof} \newline
\begin{itemize}
\item Commutativity \\ \newline
Applying the definition of dot product to $\vv{u}\cdot\vv{v}$ and $\vv{v}\cdot\vv{u}$ we obtain
\begin{align*}
\vv{u}\cdot\vv{v}&=u_1v_1+u_2v_2+\hdots+u_nv_n \\
&=v_1u_1+v_2u_2+\hdots+v_nu_n \text{ (Multiplication of Real Numbers is Commutative)}\\
&=\vv{v}\cdot\vv{u}
\end{align*}
\item $\(c\vv{u}\)\cdot\vv{v}=c\(\vv{u}\cdot\vv{v}\)$ \\ \newline
Using the definitions of scalar multiplication and dot product, we have
\begin{align*}
\(c\vv{u}\)\cdot\vv{v}&=\begin{bmatrix}cu_1,cu_2,\hdots,cu_n\end{bmatrix}\cdot\begin{bmatrix}v_1,v_2,\hdots,v_2\end{bmatrix} \\
&=cu_1v_1+cu_2v_2+\hdots+cu_nv_n \\
&=c\(u_1v_1+u_2v_2+\hdots+u_nv_n\) \\
&=c\(\vv{u}\cdot\vv{v}\)
\end{align*}
\end{itemize}
\end{proof}
Question: Is $\overbrace{\(\vv{u}\cdot\vv{v}\)}^{\in\mathbb{R}}\cdot\vv{w}$ defined for vectors $\vv{u},\vv{v},\vv{w}\in\mathbb{R}^n$? No, as $\vv{u}\cdot\vv{v}$ returns a scalar which also means $\(\vv{u}\cdot\vv{v}\)\vv{w}$ is defined.
\begin{definition}{Definition: Length/Magnitude/Norm}
The \textbf{length/magnitude/norm} of a vector $\vv{u}\in\mathbb{R}^n$ is defined to be $\norm{\vv{u}}=\sqrt{\vv{u}\cdot\vv{u}}$ which also means $\vv{u}\cdot\vv{u}=\norm{\vv{u}}^2$.
$$\text{If }\vv{u}=\begin{bmatrix}u_1\\u_2\\\vdots\\u_n\end{bmatrix}\text{ then }\norm{\vv{u}}=\sqrt{u_1^2+u_2^2+\hdots+u_n^2}$$
Notes:
\begin{tasks}[label = $\star$]
\task $\norm{c\vv{u}}=\abs{c}\times\norm{\vv{u}}$
\task $\norm{\vv{u}+\vv{v}}\ne\norm{\vv{u}}+\norm{\vv{v}}$
\end{tasks}
\end{definition}
\begin{definition}{Definition: Unit Vector}
A \textbf{unit vector} is a vector whose magnitude is one.
\begin{example}{Example: Unit Vector}
$\begin{bmatrix}-1/3\\2/3\\2/3\end{bmatrix}$ thus $\norm{\begin{bmatrix}-1/3\\2/3\\2/3\end{bmatrix}}=\sqrt{\frac{1}{9}+\frac{4}{9}+\frac{4}{9}}=1$
\end{example}
\end{definition}
\begin{definition}{Definition: Normalizing}
For any nonzero vector $\vv{u}$, we can create a unit vector in the same drection as $\vv{u}$ by \textbf{normalizing} $\vv{u}$.
$$\underbrace{\frac{\vv{u}}{\norm{\vv{u}}}=\frac{1}{\norm{\vv{u}}}\vv{u}}_{\text{normalization of }\vv{u}}$$
\begin{example}{Example: Normalizing}
$\vv{u}=\begin{bmatrix}1\\-1\\2\\3\end{bmatrix}$
\begin{align*}
\norm{\vv{u}}&=\sqrt{1+1+4+9} =\sqrt{15} \\
\vv{v}&=\frac{1}{\sqrt{15}}\begin{bmatrix}1\\-1\\2\\3\end{bmatrix}
\end{align*}
\end{example}
\end{definition}
\begin{definition}{Definition: Distance}
The \textbf{distance} or $d\(\vv{x},\vv{y}\)$ between the vectors $\vv{x},\vv{y}\in\mathbb{R}^n$ is defined by
$$d\(\vv{x},\vv{y}\)=\norm{\vv{x}-\vv{y}}.$$
\begin{center}
\begin{tikzpicture}[scale = 3]
\fill (0.5,0.86602540378443864676372317075294) circle[radius = 1pt];
\fill (0.86602540378443864676372317075294, 0.5) circle[radius = 1pt];
\draw[ultra thick, black] (0.5,0.86602540378443864676372317075294) -- (0.86602540378443864676372317075294, 0.5) node [right, pos = 0.5] {$d\(\vv{x},\vv{y}\)$};
\draw[ultra thick, -stealth, red] (0,0) -- (0.5,0.86602540378443864676372317075294) node[above, pos = 0.5, rotate = 60] {\vv{y}};
\draw[ultra thick, -stealth, blue] (0,0) -- (0.86602540378443864676372317075294, 0.5) node[above, pos = 0.5, rotate = 30] {\vv{x}};
\fill (0,0) circle[radius = 1pt];
\end{tikzpicture}
\end{center}
\begin{example}{Example: Distance}
$\vv{x}=\begin{bmatrix}1\\3\\2\end{bmatrix}, \vv{y}=\begin{bmatrix}-2\\4\\1\end{bmatrix}$
$$d\(\vv{x},\vv{y}\)=\norm{\vv{x}-\vv{y}}=\norm{\begin{bmatrix}1-\(-2\)\\3-4\\2-1\end{bmatrix}}=\norm{\begin{bmatrix}3\\-1\\1\end{bmatrix}}=\sqrt{9+1+1}=\sqrt{11}$$
\end{example}
\end{definition}
\begin{definition}{Definition: Orthogonal}
Two vectors $\vv{u},\vv{v}\in\mathbb{R}^n$ are \textbf{orthogonal} if $\vv{u}\cdot\vv{v}=0$.
\begin{example}{Example: Orthogonal}
$$\begin{bmatrix}1\\3\\1\\-1\end{bmatrix}\cdot\begin{bmatrix}2\\1\\-4\\1\end{bmatrix}=2+3-4-1=0$$
\end{example}
Notes:
\begin{tasks}[label = $\star$]
\task Nonzero vectors in $\mathbb{R}^2$ or $\mathbb{R}^3$ that are orthogonal are perpendicular.
\task {$\vv{u}\cdot\vv{0}=0$ so $\vv{0}\in\mathbb{R}^n$ is orthogonal to every vector in $\mathbb{R}^n$.
\begin{tikzpicture}[scale = 2]
\fill (0,0) circle[radius = 1.5pt];
\draw[ultra thick, -stealth, black] (0,0) -- (1,0);
\end{tikzpicture}}
\task $\vv{0}$ is the only vector orthogonal to itself.
\end{tasks}
\end{definition}
\begin{example}{Example: Find Orthogonal Vectors}
Find a vector orthogonal to both $\begin{bmatrix}1\\-1\\2\end{bmatrix}$ and $\begin{bmatrix}2\\1\\1\end{bmatrix}$.\\
\newline
Some plug and chug solutions are $\begin{bmatrix}-1\\1\\1\end{bmatrix}$ and $\begin{bmatrix}1\\-1\\-1\end{bmatrix}$ but below is a general solution.
$$\begin{bmatrix}x\\y\\z\end{bmatrix}\cdot\begin{bmatrix}1\\-1\\2\end{bmatrix}=0\text{ and }\begin{bmatrix}x\\y\\z\end{bmatrix}\cdot\begin{bmatrix}2\\1\\1\end{bmatrix}=0$$
\begin{align*}
x-y+2z&=0 & 2x+y+z&=0 \\
x&=y-2z & 2\(y-2z\)+y+z&=0 \\
x&=y-2y & 2y-4z+y+z&=0 \\
x&=-y & 3y-3z&=0 \\
& & y&=z
\end{align*}
$$\begin{bmatrix}x\\y\\z\end{bmatrix}=\begin{bmatrix}-a\\a\\a\end{bmatrix}$$
\end{example}
\newpage
\section{Section 3.1/3.2 Matrices}
\subsection{Introduction}
\begin{definition}{Definition: Matrix}
An $m\times n$ \textbf{matrix} $A$ is a rectangular array of $mn$ entries:
$$A=\begin{bmatrix}a_{11}&a_{12}&a_{13}&\hdots&a_{1n}\\a_{21}&a_{22}&a_{23}&\hdots&a_{2n}\\\vdots&\vdots&\vdots&\hdots&\vdots\\a_{m1}&a_{m2}&a_{m3}&\hdots&a_{mn}\end{bmatrix}$$
Notes:
\begin{itemize}
\item $m\times n$ is the \textbf{size} of the matrix $A$ with $m$ being the number of rows and $n$ being the number of columns.
\item $a_{ij}$ is the $\(i,j\)$ entry of $A$ (the entry in the $i^{\text{th}}$ row and $j^{\text{th}}$ column of $A$)
\item $\vv{a_j}$ is the $j^{\text{th}}$ column of $A$
\item $\vv{A_i}$ is the $i^{\text{th}}$ row of $A$
\end{itemize}
\begin{example}{Example: Matrix}
$$B=\underbrace{\begin{bmatrix}3&-2&1\\4&8&9\end{bmatrix}}_{2\times3}$$
\begin{itemize}
\item $b_{2,1}=4$, $b_{12}=-2$
\item $\vv{b_2}=\begin{bmatrix}-2\\8\end{bmatrix}$
\item $\vv{B_2}=\begin{bmatrix}4&8&9\end{bmatrix}$
\end{itemize}
\end{example}
\end{definition}
\begin{definition}{Definition: Main Diagonal}
The \textbf{main diagonal entries} of a matrix $A$ are entries of the form $a_{ii}$. \\
\newline
The \textbf{main diagonal} of $A$ consists of all main diagonal entries of $A$.
\begin{example}{Example: Main Diagonal}
\begin{center}
$C=$\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=2pt]{A11}{-8} & 9 & 4 \\
2 & 1 & 0 \\
-1 & 2 & \tikzmarknode[inner sep=2pt]{A33}{9} \bigstrut[b] \\
\end{block}
\end{blockarray}
\end{center}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north)
(A33.north east) (A33.south) (A11.south west)};
\end{tikzpicture}
$C$ is a $3\times3$ matrix with the red circle indicating the main diagonal of $C$.
\end{example}
\end{definition}
\begin{definition}{Definition: Square Matrix}
A \textbf{square matrix} is a matrix with size $n\times n$.
\end{definition}
\begin{definition}{Definition: Zero Matrix}
A \textbf{zero matrix} (deonoted $0$ or $0_{mn}$) is an $m\times n$ matrix whose entries are all zero.
\begin{example}{Example: Zero Matrix}
$$0_{4,2}=\begin{bmatrix}0&0\\0&0\\0&0\\0&0\end{bmatrix}$$
\end{example}\
\end{definition}
\begin{definition}{Definition: Identity Matrix}
The \textbf{identity matrix} $I_n$ is the $m\times n$ matrix whose main diagonal entries are all $1$ and all other entries are $0$.
\begin{example}{Example: Identity Matrix}
\begin{itemize}
\item $I_2=\begin{bmatrix}1&0\\0&1\end{bmatrix}$
\item $I_4=\begin{bmatrix}1&0&0&0\\0&1&0&0\\0&0&1&0\\0&0&0&1\end{bmatrix}$
\end{itemize}
\end{example}
\end{definition}
\begin{definition}{Definition: Equal Matrices}
We say \textbf{$A=B$} if $A$ and $B$ are both $m\times n$ and $a_{ij}=b_{ij}$ for all $i,j$.
\end{definition}
\newpage
\subsection{Matrix Addition and Scalar Multiplication}
If $A$ and $B$ are both $m\times n$ matrices, then $A+B$ is the $m\times n$ matrix where:
$$\(A+B\)_{ij}=a_{ij}+b_{ij}$$
\begin{example}{Example: Adding Matrices}
$$\begin{bmatrix}1&2\\-1&3\\0&8\\4&9\end{bmatrix}+\begin{bmatrix}-2&9\\1&4\\2&2\\0&8\end{bmatrix}=\begin{bmatrix}-1&11\\0&7\\2&10\\4&17\end{bmatrix}$$
\end{example}
For $c\in\mathbb{R}$ and $m\times n$ matrix $A$, $cA$ is the $m\times n$ matrix where:
$$\(cA\)_{ij}=ca_{ij}$$
\begin{example}{Example: Scalar Multiplication With Matrices}
$$2\begin{bmatrix}-1&3&8\\4&9&1\end{bmatrix}=\begin{bmatrix}-2&6&16\\8&18&2\end{bmatrix}$$
\end{example}
Note: A column vector $\vv{v}\in\mathbb{R}^n$ is an $n\times 1$ matrix.
\begin{tasks}[label = $\star$]
\task {The properties of Matrix Addition and Scalar Multiplication correspond directly to the properties discussed in Section 1.1. For vector addition and scalar multiplication.}
\end{tasks}
\begin{definition}{Definition: Transpose}
Let $A$ be an $m\times n$ matrix. The $n\times m$ matrix $A^T$ (\textbf{``$A$ Transpose''}) is the mattrix where:
$$\(A^T\)_{ij}=a_{ji}$$
\begin{example}{Example: Transpose}
\begin{itemize}
\item $A=\underbrace{\begin{bmatrix}2&-1&3\\0&4&5\end{bmatrix}}_{2\times3}$, $A^T=\underbrace{\begin{bmatrix}2&0\\-1&4\\3&5\end{bmatrix}}_{3\times2}$
\item $\vv{v}=\begin{bmatrix}1\\2\\3\\4\end{bmatrix}\text{Column Vector } 4\times1$, $\vv{v}^T=\underbrace{\begin{bmatrix}1&2&3&4\end{bmatrix}}_{\text{Row Vector } 1\times4}$
\end{itemize}
\end{example}
Notes:
\begin{itemize}
\item $\(A^T\)^T=A$
\item $\(A+B\)^T=A^T+B^T$
\end{itemize}
\end{definition}
\begin{definition}{Definition: Symmetric Matrix}
If $A=A^T$, we call $A$ a \textbf{symmetric matrix}.
\begin{example}{Example: Symmetric Matrix}
\begin{center}
$A=$\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=2pt]{A11}{1} & 3 & 8 \\
3 & 2 & 4 \\
8 & 4 & \tikzmarknode[inner sep=2pt]{A33}{9} \bigstrut[b] \\
\end{block}
\end{blockarray}
\end{center}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick, dotted] plot[smooth cycle] coordinates {(A11.north)
(A33.north east) (A33.south) (A11.south west)};
\end{tikzpicture}
\end{example}
\end{definition}
\newpage
\subsection{Matrix-Vector Products}
Let $A$ be a $m\times n$ matrix and $\vv{x}\in\mathbb{R}^n$ then:
\begin{align*}
A\vv{x}&=\begin{bmatrix}\vv{a_1},\vv{a_2},\vv{a_3},\hdots,\vv{a_n}\end{bmatrix}\begin{bmatrix}x_1\\x_2\\x_3\\\vdots\\x_n\end{bmatrix} \\
&=x_1\vv{a_1}+x_2\vv{a_2}+x_3\vv{a_3}+\hdots+x_n\vv{a_n}
\end{align*}
Notes:
\begin{itemize}
\item $A\vv{x}$ is a linear combination of the columns of $A$.
\item {$A\vv{x}$ is only defined if the number of columns in $A$ equals the number of rows/components in $\vv{x}$. For $A\vv{x}=\vv{b}$ to be defined then $A$ needs to be $m\times n$ and $\vv{x}$ needs to be $n\times 1$.}
\end{itemize}
\begin{example}{Example: Matrix-Vector Multiplication Example 1}
\begin{align*}
\underbrace{\begin{bmatrix}1&3&-1\\2&4&5\end{bmatrix}}_{\textcolor{red}{2}\times\textcolor{violet}{3}}\underbrace{\begin{bmatrix}-3\\0\\6\end{bmatrix}}_{\textcolor{violet}{3}\times\textcolor{red}{1}}&=3\begin{bmatrix}1\\2\end{bmatrix}+0\begin{bmatrix}3\\4\end{bmatrix}+6\begin{bmatrix}-1\\5\end{bmatrix} \\
&=\begin{bmatrix}-3\\-6\end{bmatrix}+\vv{0}+\begin{bmatrix}-6\\30\end{bmatrix} \\
&=\underbrace{\begin{bmatrix}-9\\24\end{bmatrix}}_{\textcolor{red}{2}\times\textcolor{red}{1}}
\end{align*}
\end{example}
\begin{example}{Example: Matrix-Vector Multiplication Example 2}
\begin{align*}
\underbrace{\begin{bmatrix}1&3&8\\0&-1&1\\2&1&4\\1&2&1\end{bmatrix}}_{\textcolor{red}{4}\times\textcolor{violet}{3}}\underbrace{\begin{bmatrix}2\\-1\\1\end{bmatrix}}_{\textcolor{violet}{3}\times\textcolor{red}{1}}&=2\begin{bmatrix}1\\0\\2\\1\end{bmatrix}-1\begin{bmatrix}3\\-1\\1\\2\end{bmatrix}+1\begin{bmatrix}8\\1\\4\\1\end{bmatrix} \\
&=\begin{bmatrix}2\\0\\4\\2\end{bmatrix}-\begin{bmatrix}3\\-1\\1\\2\end{bmatrix}+\begin{bmatrix}8\\1\\4\\1\end{bmatrix}\\
&=\underbrace{\begin{bmatrix}7\\2\\7\\1\end{bmatrix}}_{\textcolor{red}{4}\times\textcolor{red}{1}}
\end{align*}
\end{example}
\begin{example}{Example: Matrix-Vector Multiplication Example 3}
\begin{center}
\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=2pt]{A11}{1} & \tikzmarknode[inner sep=2pt]{A12}{3} & \tikzmarknode[inner sep=2pt]{A13}{8} \\
0 & -1 & 1 \\
2 & 1 & 4 \\
1 & 2 & 1\bigstrut[b] \\
\end{block}
\end{blockarray}
\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=2pt]{B11}{2} \\ \tikzmarknode[inner sep=2pt]{B21}{-1} \\ \tikzmarknode[inner sep=2pt]{B31}{1} \bigstrut[b] \\
\end{block}
\end{blockarray}$=\begin{bmatrix}1\(2\)+3\(-1\)+8\(1\)\\0\(2\)-1\(-1\)+\(1\)\(1\)\\2\(2\)+1\(-1\)+4\(1\)\\1\(2\)+2\(-1\)+1\(1\)\end{bmatrix}=\begin{bmatrix}7\\2\\7\\1\end{bmatrix}$
\end{center}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north) (A12.north) (A13.north) (A13.east) (A13.south) (A12.south) (A11.south) (A11.west)};
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B31.south) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture}
\end{example}
\newpage
\subsection{Matrix Multiplication}
Let $A$ be an $m\times \textcolor{red}{n}$ matrix and let $B$ be an $\textcolor{red}{n}\times q$ matrix. Then $AB$ is the $m\times q$ matrix where:
\begin{align*}
AB&=A\begin{bmatrix}\vv{b_1}&\vv{b_2}&\vv{b_3}\hdots \vv{b_q}\end{bmatrix} \\
&=\begin{bmatrix}A\vv{b_1}&A\vv{b_2}&A\vv{b_3}\hdots A\vv{b_q}\end{bmatrix}
\end{align*}
Notes:
\begin{itemize}
\item $AB=C$ where $A$ is $\textcolor{red}{m}\times\textcolor{violet}{n}$, $B$ is $\textcolor{violet}{n}\times\textcolor{red}{q}$ and $C$ is $\textcolor{red}{m}\times\textcolor{red}{q}$
\item {Question: If $A$ is $m\times n$ and $\vv{x}\in\mathbb{R}^n$, is $\vv{x}A$ defined? $\vv{x}A$ would be undefined as $\vv{x}$ is $\textcolor{red}{n}\times\textcolor{violet}{1}$ and $A$ is $\textcolor{violet}{m}\times \textcolor{red}{n}$ so unless $m=1$ then $\vv{x}A=\text{undefined}$. In conclusion $A\vv{x}\ne\vv{x}A$.}
\end{itemize}
\begin{example}{Example: Matrix Multiplication Example 1}
$$AB=\underbrace{\begin{bmatrix}1&3&-1\\2&0&4\end{bmatrix}}_{2\times\textcolor{red}{3}}\underbrace{\begin{bmatrix}-1&0&2&1\\1&1&2&-1\\0&1&-1&2\end{bmatrix}}_{\textcolor{red}{3}\times4}=\underbrace{\begin{bmatrix}2&2&9&-4\\-2&4&0&10\end{bmatrix}}_{2\times4}$$
\begin{align*}
\begin{bmatrix}1&3&-1\\2&0&4\end{bmatrix}\begin{bmatrix}-1\\1\\0\end{bmatrix}&=-1\begin{bmatrix}1\\2\end{bmatrix}+1\begin{bmatrix}3\\0\end{bmatrix}+0\begin{bmatrix}-1\\4\end{bmatrix}=\begin{bmatrix}2\\-2\end{bmatrix} \\
\begin{bmatrix}1&3&-1\\2&0&4\end{bmatrix}\begin{bmatrix}0\\1\\1\end{bmatrix}&= 0\begin{bmatrix}1\\2\end{bmatrix}+1\begin{bmatrix}3\\0\end{bmatrix}+1\begin{bmatrix}-1\\4\end{bmatrix}=\begin{bmatrix}2\\4\end{bmatrix}\\
\begin{bmatrix}1&3&-1\\2&0&4\end{bmatrix}\begin{bmatrix}2\\2\\-1\end{bmatrix}&= \begin{bmatrix}9\\0\end{bmatrix}\\
\begin{bmatrix}1&3&-1\\2&0&4\end{bmatrix}\begin{bmatrix}1\\-1\\2\end{bmatrix}&=\begin{bmatrix}-4\\10\end{bmatrix}
\end{align*}
\end{example}
Notes:
\begin{itemize}
\item $AB=C$ as $A$ is $2\times\textcolor{red}{3}$ and $B$ is $\textcolor{red}{3}\times4$ making $C$ being $2\times4$
\item $BA=\text{undefined}$ as $B$ is $3\times\textcolor{red}{4}$ and $A$ is $\textcolor{red}{2}\times3$, thus as $\textcolor{red}{4}\ne\textcolor{red}{2}$ then $BA$ is undefined
\item In general, $AB\ne BA$ thus \textbf{order matters} for matrix multiplication.
\end{itemize}
\begin{example}{Example: Matrix Multiplication Example 2}
\begin{align*}
\text{\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{A11}{1} & \tikzmarknode[inner sep=3pt]{A12}{3} & \tikzmarknode[inner sep=3pt]{A13}{-1} \\
2 & 0 & 4 \\
\end{block}
\end{blockarray}
\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{-1} & 0 & 2 & 1 \\
\tikzmarknode[inner sep=3pt]{B21}{1} & 1 & 2 & -1\\
\tikzmarknode[inner sep=3pt]{B31}{0} & 1 & -1 & 2\bigstrut[b] \\
\end{block}
\end{blockarray}}&=\begin{bmatrix}-1+3+0&0+3-1&2+6+1&1-3-2\\-2+0+0&0+0+4&4+0-4&2+0+8\end{bmatrix} \\
&=\begin{bmatrix}2&2&9&-4\\-2&4&0&10\end{bmatrix}
\end{align*}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north) (A12.north) (A13.north) (A13.east) (A13.south) (A12.south) (A11.south) (A11.west)};
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B31.south) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture}
\end{example}
\begin{example}{Example: Matrix Multiplication Example 3}
$$\underbrace{\begin{bmatrix}1&-1&0&2\\3&-2&4&1\\0&1&-1&2\end{bmatrix}}_{\textcolor{violet}{3}\times\textcolor{red}{4}}\underbrace{\begin{bmatrix}2&3\\1&-1\\0&2\\4&1\end{bmatrix}}_{\textcolor{red}{4}\times\textcolor{violet}{2}}=\underbrace{\begin{bmatrix}9&6\\8&20\\9&-1\end{bmatrix}}_{\textcolor{violet}{3}\times\textcolor{violet}{2}}$$
\begin{align*}
\begin{bmatrix}1&-1&0&2\\3&-2&4&1\\0&1&-1&2\end{bmatrix}\begin{bmatrix}2\\1\\0\\4\end{bmatrix}&=2\begin{bmatrix}1\\3\\0\end{bmatrix}+1\begin{bmatrix}-1\\-2\\1\end{bmatrix}+0\begin{bmatrix}0\\4\\-1\end{bmatrix}+4\begin{bmatrix}2\\1\\2\end{bmatrix}=\begin{bmatrix}9\\8\\9\end{bmatrix} \\
\text{\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{A11}{1} & \tikzmarknode[inner sep=3pt]{A12}{-1} & \tikzmarknode[inner sep=3pt]{A13}{0} & \tikzmarknode[inner sep=3pt]{A14}{2}\\
3 & -2 & 4 & 1\\
0 & 1 & -1 & 2\\
\end{block}
\end{blockarray}
\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{3}\\
\tikzmarknode[inner sep=3pt]{B21}{-1}\\
\tikzmarknode[inner sep=3pt]{B31}{2}\\
\tikzmarknode[inner sep=3pt]{B41}{1}\\
\end{block}
\end{blockarray}}&=\begin{bmatrix}3\(1\)-1\(-1\)+2\(0\)+1\(2\)\\3\(3\)-1\(-2\)+2\(4\)+1\(1\)\\3\(0\)-1\(1\)+2\(-1\)+1\(2\)\end{bmatrix}=\begin{bmatrix}6\\20\\-1\end{bmatrix}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north) (A12.north) (A13.north)(A14.north) (A14.east) (A14.south)(A13.south) (A12.south) (A11.south) (A11.west)};
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east)(B41.east) (B41.south) (B41.west) (B31.west)(B21.west) (B11.west)};
\end{tikzpicture} \\
\text{\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{A11}{1} & \tikzmarknode[inner sep=3pt]{A12}{-1} & \tikzmarknode[inner sep=3pt]{A13}{0} & \tikzmarknode[inner sep=3pt]{A14}{2}\\
3 & -2 & 4 & 1\\
0 & 1 & -1 & 2\\
\end{block}
\end{blockarray}
\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{2} & 3\\
\tikzmarknode[inner sep=3pt]{B21}{1} & -1\\
\tikzmarknode[inner sep=3pt]{B31}{0} & 2\\
\tikzmarknode[inner sep=3pt]{B41}{4} & 1\\
\end{block}
\end{blockarray}}&=\begin{bmatrix}9&6\\8&20\\9&-1\end{bmatrix}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north) (A12.north) (A13.north)(A14.north) (A14.east) (A14.south)(A13.south) (A12.south) (A11.south) (A11.west)};
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east)(B41.east) (B41.south) (B41.west) (B31.west)(B21.west) (B11.west)};
\end{tikzpicture} \\
\end{align*}
\end{example}
\newpage
\subsection{Properties of Matrix Multiplication}
Let $A,B,C$ be matrices of appropriate size. Let $k\in\mathbb{R}$.
\begin{enumerate}
\item $\(AB\)C=A\(BC\)$ (Associativity)
\item $\(AB\)^T=B^TA^T$
\item $A\(B+C\)=AB+AC$ (Left Distributivity)
\item $\(B+C\)A=BA+CA$ (Right Distributivity)
\item $k\(AB\)=\(kA\)B=A\(kB\)$
\item {If $A$ is $m\times n$, then $AI_n=I_mA=A$. With $I_n$ and $I_m$ being identity matrices. (Multiplicative Identity)}
\end{enumerate}
In General:
\begin{itemize}
\item $AB\ne BA$
\item If $AB=AC$, we cannot assume $B=C$.
\item If $AB=0$ (where $0$ is a zero matrix), we cannot assume either $A=0$ or $B=0$.
\end{itemize}
\begin{proof} \newline
\begin{itemize}
\item To prove $A\(B+C\)=AB+AC$ (Left Distributivity), we let the rows of $A$ be denoted by $\vv{A_i}$ and the columns of $B$ and $C$ by $b_i$ and $c_i$. Then the $j$th column of $B+C$ is $b_j+c_j$ (since addition is defined componentwise), and thus
\begin{align*}
\[A\(B+C\)\]_{ij}&=\vv{A_i}\cdot\(\vv{b_j}+\vv{c_j}\) \\
&=\vv{A_i}\cdot\vv{b_j}+\vv{A_i}\cdot\vv{c_j} \\
&=\(AB\)_{ij}+\(AC\)_{ij} \\
&=\(AB+AC\)_{ij}
\end{align*}
Since this is true for all $i$ and $j$, we must have $A\(B+C\)=AB+AC$.
\item To prove $AI_n=A$ (Multiplicative Identity), we note that the identity matrix $I_n$ can be column-partitioned as
$$I_n=\begin{bmatrix}\vv{e_1}&\vv{e_2}&\hdots&\vv{e_n}\end{bmatrix}$$
where $\vv{e_i}$ is a standard unit vector. Therefore,
\begin{align*}
AI_n&=\begin{bmatrix}A\vv{e_1}&A\vv{e_2}&\hdots&Ae_n\end{bmatrix} \\
&=\begin{bmatrix}\vv{a_1}&\vv{a_2}&\hdots&\vv{a_n}\end{bmatrix} \\
&=A
\end{align*}
by the fact that $A\vv{e_j}$ is the $j$th column of $A$.
\end{itemize}
\end{proof}
\begin{example}{Example: $AB\ne BA$}
\begin{align*}
\underbrace{\begin{bmatrix}1&-1\\0&0\end{bmatrix}}_{2\times2}\underbrace{\begin{bmatrix}1&1\\1&1\end{bmatrix}}_{2\times2}&=\underbrace{\begin{bmatrix}0&0\\0&0\end{bmatrix}}_{2\times2} \\
\underbrace{\begin{bmatrix}1&1\\1&1\end{bmatrix}}_{2\times2}\underbrace{\begin{bmatrix}1&-1\\0&0\end{bmatrix}}_{2\times2}&=\underbrace{\begin{bmatrix}1&-1\\1&-1\end{bmatrix}}_{2\times2}
\end{align*}
\end{example}
\begin{example}{Example: \text{$AB=AC$} but $B\ne C$}
$$A=\begin{bmatrix}1&0\\0&0\end{bmatrix}\text{ }B=\begin{bmatrix}2&3\\4&5\end{bmatrix}\text{ }C=\begin{bmatrix}2&3\\0&8\end{bmatrix}$$
\begin{align*}
AC&=\begin{bmatrix}1&0\\0&0\end{bmatrix}\begin{bmatrix}2&3\\0&8\end{bmatrix}=\begin{bmatrix}2&3\\0&0\end{bmatrix} \\
AB&=\begin{bmatrix}1&0\\0&0\end{bmatrix}\begin{bmatrix}2&3\\4&5\end{bmatrix}=\begin{bmatrix}2&3\\0&0\end{bmatrix}
\end{align*}
$$AB=AC \text{ but } B\ne C$$
\end{example}
\newpage
\subsection{Properties of Matrix Powers}
If $A$ is $n\times n$, then:
$$\begin{aligned}
A^2&=AA \\
A^3&=AAA = A^2A\\
A^4&=AAAA=A^2A^2=A^3A \\
&=\hdots
\end{aligned}$$
Properties for $r$ and $s$ are positive integers:
\begin{itemize}
\item $A^rA^s = A^{r+a}$
\item $\(A^r\)^s=A^{rs}$
\end{itemize}
\begin{example}{Example: Matrix Squared}
$$\begin{bmatrix}-1&2\\3&1\end{bmatrix}^2=\begin{bmatrix}-1&2\\3&1\end{bmatrix}\begin{bmatrix}-1&2\\3&1\end{bmatrix}=\begin{bmatrix}7&0\\0&7\end{bmatrix}=7I_2$$
\end{example}
If $A$ wasn't a squared matrix ex. $3\times2$ then you can't multiply it by itself as their isn't as many columns as their are rows, $\(3\times\textcolor{red}{2}\)\(\textcolor{red}{3}\times2\)$.
\begin{example}{Example: Properties of Matrix Powers}
Simplify $\(A+B\)^2$. (where $A$ and $B$ are $n\times n$)
$$\(A+B\)\(A+B\)=A^2+AB+BA+B^2$$
Do note that it is not $A^2+2AB+B^2$ as order matters in matrix multiplication.
$$\(AB\)^2=ABAB\ne A^2B^2$$
\end{example}
Consider The Question: Is $\begin{bsmallmatrix}3\\1\\-4\end{bsmallmatrix}$ a linear combination of $\begin{bsmallmatrix}2\\-1\\2\end{bsmallmatrix}$ and $\begin{bsmallmatrix}1\\1\\3\end{bsmallmatrix}$?
\begin{itemize}
\item {Are there $x,y\in\mathbb{R}$ such that:
$$x\begin{bmatrix}2\\-1\\2\end{bmatrix}+y\begin{bmatrix}1\\1\\3\end{bmatrix}=\begin{bmatrix}3\\1\\-4\end{bmatrix}\text{? Vector Form}$$}
\item {Are there $x,y\in\mathbb{R}$ such that:
$$\begin{bmatrix}2&1\\-1&1\\2&3\end{bmatrix}\begin{bmatrix}x\\y\end{bmatrix}=\begin{bmatrix}3\\1\\-4\end{bmatrix}\text{? Matrix-Form or Matrix-Vector Form}$$}
\item {Are there $x,y\in\mathbb{R}$ such that:
$$\sysdelim..\systeme{
2x+y=3,
-x+y=1,
2x+3y=-4
}\text{Equation Form}$$}
\end{itemize}
\newpage
\section{Section 2.1 Intro to Linear Systems}
\begin{definition}{Definition: Linear Equation}
A \textbf{linear equation} in the $n$ \textbf{variables} $x_1,x_2,\hdots,x_n$ is an equation that can be written in the form:
$$a_1x_1+a_2x_2+\hdots+a_nx_n=b$$
where $a_1,a_2,\hdots,a_n$ are \textbf{coefficents} and $b$ is the \textbf{constant term}.
\begin{example}{Example: Linear Equation}
\begin{tblr}{width = \textwidth, colspec={XX}, cells = {halign = c, valign = m}}
{\raisebox{-0.5\height}{$\begin{aligned}
2x&=3y+1 \\
2y-3y&=1
\end{aligned}$}} & {$$xy=1$$ Non Linear} \\
{Linear}
\end{tblr}
\end{example}
\end{definition}
\begin{definition}{Definition: Solution}
A \textbf{solution} to a linear equation is an assignment of the variables that results in a true statement.
\begin{example}{Example: Solution}
$$x+y+z=1$$
\begin{tblr}{width = \textwidth, colspec={X|X|X}, cells = {halign = c, valign = m, mode = math}}
{\begin{aligned}x&=4\\y&=-2\\x&=-1\end{aligned}} & {\begin{aligned}x&=\frac{1}{3}\\y&=\frac{1}{3}\\z&=\frac{1}{3}\end{aligned}} & {\begin{aligned}x&=0\\y&=0\\z&=1\end{aligned}}
\end{tblr}
3 distinct solutions (but many others exist)
\end{example}
\end{definition}
\begin{definition}{Definition: Linear System/System of Linear Equations}
A set of $m$ linear equations in $n$ variables $x_1,x_2,\hdots,x_n$ is called an $m\times n$ \textbf{linear system/system of linear equations}. \\
\newline
A \textbf{solution} to an $m\times n$ linear system is an assignment of the variables that simultaneously satisfies \textbf{all} $m$ equations.
\begin{example}{Example: Linear System/System of Linear Equations}
$$\sysdelim..\systeme{
x+y+z=1,
2x-y+z=2
}$$
Is $x=4$, $y=-2$, $z=-1$ a solution to this system? \textbf{No}, as the arguments don't satisfie both equations.
\begin{align*}
x+y+z&=4-2-1=1 \text{ }\checkmark \\
2x-y+z&=2\(4\)-\(-2\)+\(-1\)=9\ne2
\end{align*}
Is $x=1$, $y=0$, $z=0$ a solution? \textbf{Yes}
\begin{align*}
x+y+z&=1+0+0=1\text{ }\checkmark \\
2x-y+z&=2\(1\)-0+0=2\text{ }\checkmark
\end{align*}
\end{example}
\end{definition}
\begin{example}{Example: Solution to System of Equations}
Find all solutions to:
$$\begin{cases}
\sysdelim..\systeme{
3x+y=9,
x-2y=-4
}
\end{cases}$$
$$\begin{aligned}
3x+y&=9 & x-2y&=-4 \\
y&=9-3x\Rightarrow& x-2\(9-3x\)&=-4 \\
y&=9-3\(2\)& x-18+6x&=-4 \\
\Aboxed{y&=3} & 7x&=14 \\
& & \Leftarrow \Aboxed{x&=2}
\end{aligned}$$
One Unique Solution, which can also be seen graphically
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -10, ymax = 10,
xmin = -10, xmax = 10,
xlabel = {$x$},
ylabel = {$y$},
%xtick = {1, 2, 3, 4, 5, 6},
%ytick = {-1, 1, 2, 3, 4, 5, 6, 7, 8},
%xticklabels={},
every axis plot post/.append style={ultra thick},
clip = true,
axis line style = {Stealth-Stealth, thick},
legend style = {draw = none},
legend pos = outer north east
]
\addplot [
black,
samples = 200,
style = {Stealth-Stealth},
domain = -1/3:19/3,
]
{9-(3*x)};
\addplot [
black,
style = {Stealth-Stealth},
samples = 200,
domain = -10:10,
]
{(-4-x)/-2};
\addplot[mark=*] coordinates {(2,3)} node[below right] {$\(2,3\)$};
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\begin{tblr}{width = \linewidth, colspec = {XXX}, cells = {halign = c, valign = m}}
{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -5, ymax = 5,
xmin = -5, xmax = 5,
xlabel = {$x$},
ylabel = {$y$},
xtick = {100},
ytick = {100},
%xticklabels={},
every axis plot post/.append style={ultra thick},
clip = true,
axis line style = {Stealth-Stealth, thick},
legend style = {draw = none},
legend pos = outer north east
]
\addplot [
black,
style = {Stealth-Stealth},
samples = 200,
domain = -5:4,
] {x+1};
\addplot [
black,
style = {Stealth-Stealth},
samples = 200,
domain = -4:5,
] {x-1};
\end{axis}
\end{tikzpicture}} &
{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -5, ymax = 5,
xmin = -5, xmax = 5,
xlabel = {$x$},
ylabel = {$y$},
xtick = {100},
ytick = {100},
%xticklabels={},
every axis plot post/.append style={ultra thick},
clip = true,
axis line style = {Stealth-Stealth, thick},
legend style = {draw = none},
legend pos = outer north east
]
\addplot [
black,
style = {Stealth-Stealth},
samples = 200,
domain = -5:5,
] {x};
\addplot [
black,
style = {Stealth-Stealth},
samples = 200,
domain = -4:5,
] {-x+1};
\addplot[mark=*] coordinates {(0.5,0.5)};
\end{axis}
\end{tikzpicture}} &
{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -5, ymax = 5,
xmin = -5, xmax = 5,
xlabel = {$x$},
ylabel = {$y$},
xtick = {100},
ytick = {100},
%xticklabels={},
every axis plot post/.append style={ultra thick},
clip = true,
axis line style = {Stealth-Stealth, thick},
legend style = {draw = none},
legend pos = outer north east
]
\addplot [
black,
style = {Stealth-Stealth},
samples = 200,
domain = -5:5,
] {x};
\end{axis}
\end{tikzpicture}} \\
{No Solution} & {One Unique Solution} & {Infinitely Many Solutions}
\end{tblr} \newline
Any $m\times n$ linear system must have either no solution (inconsistent system), one unique solution, or infinitely many solutions. With one unique solution and infinitely many solutions being consistant systems.
\begin{example}{Example: Back and Forward Substitution}
Solve the following linear system: \\
\begin{tblr}{width = \linewidth, colspec = {X|X}, cells = {halign = l, valign = m}, row{2} = {halign = c}}
{$$\sysdelim..\systeme{
x+y-z=4,
2y+z=10,
3z=12
}$$ Upper Triangular Form so use Back-Substitution:} & {$$\sysdelim..\systeme{
x=3,
x-2y=9,
x+y-z=4
}$$Lower Triangular Form so use Forward-Substitution:} \\
{$\begin{aligned}
2y+4&=10 & x+3-4&=4 \\
2y&=6 & x&=5 \\
y&=3
\end{aligned}$ \\
\fbox{$\begin{aligned}x&=5\\y&=3\\z&=4\end{aligned}$}\\\newline One Unique Solution} &
{$\begin{aligned}
3-2y&=9 & 3-3-z&=4 \\
-2y&=6 & z&=-4 \\
y&=-3
\end{aligned}$ \\
\fbox{$\begin{aligned}x&=3\\y&=-3\\z&=-4\end{aligned}$}\\\newline One Unique Solution}
\end{tblr}
\end{example}
\begin{example}{Example: Getting a System of Equations into Back-Substitution}
Solve the following linear system:
$$\sysdelim..\sysautonum{\(*\)}\systeme{
-x-2y+z=1,
x+y-2z=1,
2x+6y+2z=0
}$$ First goal is to get the system into Upper Trangular Form, to do this first add equation $\(1\)$ and $\(2\)$ to get:
$$\(1\)+\(2\)=0-y-z=2$$
Next add $2\(1\)$ and $\(3\)$ to get:
$$2\(1\)+\(3\)=0+2y+4z=2$$
Combining everything into a new system of equations to get:
$$\sysdelim..\sysautonum{\(*\)}\systeme{
-x-2y+z=1,
-y-z=2,
2y+4z=3
}$$
Next add $2\(2\)$ and $\(3\)$ to get:
$$2\(2\)+\(3\)=0+2z=6$$
Combining everything into a new system of equations to get:
$$\sysdelim..\systeme{
-x-2y+z=1,
-y-z=2,
2z=6
}$$
And using Back-Substitution to solve gets
\fbox{$\begin{aligned}x&=12\\y&=-5\\z&=3\end{aligned}$}. This can also be written in different forms:
\begin{itemize}
\item {Equation Form:
$$\sysdelim..\systeme{
-x-2y+z=1,
x+y-2z=1,
2x+6y+2z=0
}$$}
\item {Vector Form:
$$x\begin{bmatrix}-1\\1\\2\end{bmatrix}+y\begin{bmatrix}-2\\1\\6\end{bmatrix}+z\begin{bmatrix}1\\-2\\2\end{bmatrix}=\begin{bmatrix}1\\1\\0\end{bmatrix}$$}
\item {Matrix Form/Matrix-Vector Form
$$\begin{bmatrix}-1&-2&1\\1&1&-2\\2&6&2\end{bmatrix}\begin{bmatrix}x\\y\\z\end{bmatrix}=\begin{bmatrix}1\\1\\0\end{bmatrix}$$}
\item {Solution in Equation Form:$$\begin{aligned}x&=12\\y&=-5\\z&=3\end{aligned}$$}
\item {Solution in Vector Form:
\begin{align*}
\vv{x}&=\begin{bmatrix}12\\-5\\3\end{bmatrix} \\
\begin{bmatrix}-1&-2&1\\1&1&-2\\2&6&2\end{bmatrix}\begin{bmatrix}12\\-5\\3\end{bmatrix}&=\begin{bmatrix}1\\1\\0\end{bmatrix}
\end{align*}}
\end{itemize}
\end{example}
\begin{example}{Example: Bound and Free Variables}
Solve the following linear system:
$$\sysdelim..\systeme{
x_1+x_2-x_3+x_4=0,
x_2+x_3+2x_4=0
}$$
As you can't find $x_4$ and $x_3$ then you make them a variable say $t$ and $s$ with $t,s\in\mathbb{R}$ giving:
$$\begin{aligned}x_3&=s\\x_4&=t\end{aligned}$$
Now solving for $x_2$ gives:
\begin{align*}
x_2+s+2t&=0 \\
x_2&=-s-2t
\end{align*}
Now solving for $x_1$ gives:
\begin{align*}
x_1+\(-s-2t\)-s+t&=0\\
x_1-2s-t&=0\\
x_1&=2s+t
\end{align*}
\begin{itemize}
\item {Parametric Equation Form of Solution Set
\begin{center}\fbox{$\begin{aligned}
x_1&=2s+t\\
x_2&=-s-2t\\
x_3&=s\\
x_4&=t
\end{aligned}$}\end{center}}
\item {Parametric Vector Form of The Solution Set
\begin{align*}
\vv{x}&=\begin{bmatrix}2s+t\\-s-2t\\s\\t\end{bmatrix}\\
&=\begin{bmatrix}2s\\-s\\s\\0\end{bmatrix}+\begin{bmatrix}t\\-2t\\0\\t\end{bmatrix}\\
\Aboxed{\vv{x}&=s\begin{bmatrix}2\\-1\\1\\0\end{bmatrix}+t\begin{bmatrix}1\\-2\\0\\1\end{bmatrix}}
\end{align*}}
\end{itemize}
In this example $x_1$ and $x_2$ are called \textbf{bound variables}. \\
\newline
The non-sound variables are called \textbf{free variables}. We assign parameters to free variables. ($x_3$ and $x_4$ are free variables)
\end{example}
\begin{example}{Example: No Solution}
Solve the following linear system:
$$\sysdelim..\systeme{
4x-8y=6,
2x-4y=4
}$$
Try to get the equation into back substitution form by canceling the $2x$ in the second equation:
\begin{center}
\begin{tabular}{r}
{$-\frac{1}{2}\(4x-8y=6\)$} \\
{$+1\(2x-4y=4\)$} \\ \hline
{$0+0=1$}
\end{tabular}
\end{center}
This now makes the linear system:
\begin{align*}
4x-8y&=6\\
\Aboxed{0&=1}
\end{align*}
As $0\ne1$ then the system has no solution, which also makes it inconsistent.
\end{example}
\newpage
\section{Section 2.2 Solving Linear Systems}
\begin{definition}{Definition: Augmented Matrix}
Consider the linear system $A\vv{x}=\vv{b}$ where $A$ is an $m\times n$ matrix and $\vv{b}\in\mathbb{R}^m$. $A$ is called the \textbf{coefficent matrix} corresponding to the linear system. \\
\newline
$\[A\Big|\vv{b}\]$ is called the \textbf{augmented matrix} corresponding to the linear system.
\begin{example}{Example: Augmented Matrix}
$$\sysdelim..\systeme{
2x-3y+0z=4,
4x+y-5z=0
}$$
\begin{itemize}
\item {$\underbrace{\begin{bmatrix}2&-3&0\\4&1&-5\end{bmatrix}}_{\text{Coefficent Matrix}}\begin{bmatrix}x\\y\\z\end{bmatrix}=\begin{bmatrix}4\\0\end{bmatrix}$}
\item {$\underbrace{\begin{bmatrix}2&-3&0&\aug&4\\4&1&-5&\aug&0\end{bmatrix}}_{\text{Augmented Matrix}}$}
\end{itemize}
\end{example}
\end{definition}
\begin{example}{Example: Augmented Matrix To System Of Equations With A Unique Solution}
Solve the linear system coresponding to the given augmented matrix:
\begin{center}
$\begin{bNiceMatrix}% don't forget the %
[first-row,]
x& y& z&&\text{constants} \\
1 & 1 & 1 & \aug&1 \\
0 & 2 & 1 &\aug& 0 \\
0 & 0 & -1 & \aug&2
\end{bNiceMatrix}$
$$\sysdelim..\systeme{
x+y+z=1,
2y+z=0,
-z=2
}$$Use Back-Substitution \\ \newline
\fbox{$\begin{aligned}x&=2\\y&=1\\z&=-2\end{aligned}$} $\vv{x}=\begin{bmatrix}2\\1\\-2\end{bmatrix}$ \\ \newline
One Unique Solution
\end{center}
\end{example}
\begin{example}{Example: Augmented Matrix to System of Equations With Infinite Solutions}
Solve the linear system coresponding to the given augmented matrix:
\begin{center}
$\begin{bNiceMatrix}% don't forget the %
[first-row,]
a& b& c&d&&\text{constants} \\
1 & 2 & 0 &0& \aug&5 \\
0 & 0 & 1 &0&\aug& 3 \\
0 & 0 & 0 &1& \aug&-2
\end{bNiceMatrix}$
$$\sysdelim..\systeme{
a+2b=5,
c=3,
d=-2
}$$
\fbox{$\begin{aligned}a&=5-2t\\b&=t\\c&=3\\d&=-2\end{aligned}$} ($a,c,d$ are \textbf{bound variables}. $b$ is a \textbf{free variable}.) \\ \newline
Infinetly Many Solutions \\ \newline
$\begin{aligned}
\vv{x}&=\begin{bmatrix}5-2t\\t\\3\\-2\end{bmatrix} \\
&=\begin{bmatrix}5\\0\\3\\-2\end{bmatrix}+\begin{bmatrix}-2t\\t\\0\\0\end{bmatrix} \\
\Aboxed{&=\begin{bmatrix}5\\0\\3\\-2\end{bmatrix}+t\begin{bmatrix}-2\\1\\0\\0\end{bmatrix}}
\end{aligned}$\\\newline Parametric Vector Form of Solution Set
\end{center}
\end{example}
\begin{definition}{Definition: Row Echelon Form (REF)}
A mattrix is said to be in \textbf{row echelon form (REF)} if:
\begin{enumerate}
\item Any zero rows, rows consisting entirely of zeros, in the matrix are at the bottom.
\item In each nonzero row, the first nonzero entry (called the \textbf{leading entry}) is in a column to the left of all leading entries below it.
\end{enumerate}
\begin{example}{Example: What is and isn't in REF}
\begin{multicols}{2}
\begin{itemize}
\item {$\underbrace{\begin{bNiceMatrix}
2&0&3&9\\
0&0&4&0\\
0&0&0&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\tikz \draw (3-4) circle (2mm) ;
\end{bNiceMatrix}}_{\text{REF}}$}
\item {$\underbrace{\begin{bNiceMatrix}
0&2\\
0&0\\
0&0
\CodeAfter
\tikz \draw (1-2) circle (2mm) ;
\end{bNiceMatrix}}_{\text{REF}}$}
\item {$\underbrace{\begin{bNiceMatrix}
2&0&3&1\\
0&3&1&1\\
0&2&0&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-2) circle (2mm) ;
\end{bNiceMatrix}}_{\text{\textbf{Not} REF}}$}
\item {$\underbrace{ \text{\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
0 & 1 & 0 & 0\\
\tikzmarknode[inner sep=3pt]{A11}{0} & \tikzmarknode[inner sep=3pt]{A12}{0} & \tikzmarknode[inner sep=3pt]{A13}{0} & \tikzmarknode[inner sep=3pt]{A14}{0}\\
0 & 0 & 3 & 1\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north) (A12.north) (A13.north)(A14.north) (A14.east) (A14.south)(A13.south) (A12.south) (A11.south) (A11.west)};
\end{tikzpicture}}_{\text{\textbf{Not} REF}}$}
\end{itemize}
\end{multicols}
\end{example}
\end{definition}
\begin{definition}{Definition: Reduced Row Echelon Form (RREF)}
A matrix is in \textbf{reduced row echelon form (RREF)} if:
\begin{enumerate}
\item It is in REF
\item The leading entry in each nonzero row is one (called a \textbf{leading one})
\item Each leading one is the only nonzero entry in its column.
\end{enumerate}
\begin{example}{Example: Reduced Row Echelon Form}
$$\begin{bNiceMatrix}
1&0&2&3&0 \\
0&1&-1&0&0 \\
0&0&0&0&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-5) circle (2mm) ;
\end{bNiceMatrix}$$
\end{example}
\end{definition}
Notes:
\begin{itemize}
\item The leading entries in a REF or RREF of a matrix are called \textbf{pivots}.
\item Given a nonzero matrix, there are infinitely many REF of the matrix, but only one \textbf{unique} RREF.
\end{itemize}
Given a matrix, we can get a REF of the matrix using \textbf{3 elementary row operations}:
\begin{enumerate}
\item {\textbf{Row Interchange:} $R_i\leftrightarrow R_j$
\begin{example}{Example: Row Interchange}
$$\begin{bmatrix}0&2\\1&3\end{bmatrix}\xrightarrow{R_1\leftrightarrow R_2}\begin{bmatrix}1&3\\0&2\end{bmatrix}$$
\end{example}}
\item {Scale a row by a \textbf{nonzero} scalar $k$: $kR_i$
\begin{example}{Example: Scale Row}
$$\begin{bmatrix}2&4\\0&1\end{bmatrix}\xrightarrow{\frac{1}{2}R_1}\begin{bmatrix}1&2\\0&1\end{bmatrix}$$
\end{example}}
\item {\textbf{Row Replacement: } $R_i+kR_j$ (replaces $R_i$)
\begin{example}{Example: Row Replacement}
$$\begin{bmatrix}1&3\\-2&4\end{bmatrix}\xrightarrow{R_2+2R_1}\begin{bmatrix}1&3\\0&10\end{bmatrix}$$
\end{example}It is also the equivalent of:
\begin{tabular}{r}
{$\begin{pmatrix}-2&4\end{pmatrix}$} \\
{$+2\begin{pmatrix}1&3\end{pmatrix}$} \\ \hline
{$\begin{matrix}0&10\end{matrix}$}
\end{tabular}}
\end{enumerate}
\begin{example}{Example: Find The REF And RREF Of A Matrix}
Find a REF of the following matrix:
$$\begin{bmatrix}1&2&-1&-1\\3&5&-2&-1\\2&2&1&1\end{bmatrix}$$
\begin{flalign*}
&\begin{bNiceMatrix}
1&2&-1&-1\\
\underline{3}&5&-2&-1\\
\underline{2}&2&1&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\noindent\xrightarrow{R_2-3R_1}\begin{bNiceMatrix}
1&2&-1&-1\\
0&\underline{-1}&1&2\\
\underline{2}&2&1&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow{R_3-2R_1}\begin{bNiceMatrix}
1&2&-1&-1\\
0&\underline{-1}&1&2\\
0&\underline{-2}&3&3
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (3mm) ;
\end{bNiceMatrix} \\
&\xrightarrow{R_3-2R_2}\underbrace{\begin{bNiceMatrix}
1&2&-1&-1\\
0&-1&1&2\\
0&0&1&-1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (3mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}}_{\text{REF}}\xrightarrow{-R_2}\begin{bNiceMatrix}
1&2&-1&-1\\
0&1&-1&-2\\
0&0&1&-1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow{R_1-2R_2}\begin{bNiceMatrix}
1&0&1&3\\
0&1&-1&-2\\
0&0&1&-1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}\\
&\xrightarrow[R_1-R_3]{R_2+R_3}\underbrace{\begin{bNiceMatrix}
1&0&0&4\\
0&1&0&-3\\
0&0&1&-1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}}_{\text{RREF}}&&
\end{flalign*}
\end{example}
\begin{example}{Example: Find The RREF Of A Matrix}
Row reduce the matrix below to its RREF:
$$\begin{bmatrix}1&-2&1&4&1\\-1&2&1&2&0\\2&-4&0&2&1\end{bmatrix}$$
\begin{align*}
&\begin{bNiceMatrix}
1&-2&1&4&1\\
-1&2&1&2&0\\
2&-4&0&2&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow[R_2+R_1]{R_3-2R_1}\begin{bNiceMatrix}
1&-2&1&4&1\\
0&0&2&6&1\\
0&0&\underline{-2}&-6&-1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow{\frac{1}{2}R_2}\begin{bNiceMatrix}
1&-2&1&4&1\\
0&0&1&3&\frac{1}{2}\\
0&0&\underline{-2}&-6&-1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\end{bNiceMatrix}\\
&\xrightarrow[R_1-R_2]{R_3+2R_2}\begin{bNiceMatrix}
1&-2&0&1&\frac{1}{2}\\
0&0&1&3&\frac{1}{2}\\
0&0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\end{bNiceMatrix}&&
\end{align*}
\end{example}
\begin{definition}{Definition: Row Equivalent}
The matrices $A$ and $B$ are \textbf{row equivalent} if there is a sequence of elementary row operations that transforms $A$ into $B$.
\end{definition}
Notes:
\begin{itemize}
\item If $\[A\Big|\vv{b}\]$ and $\[C\Big|\vv{d}\]$ are \textbf{row equivalent}, then the linear systems $A\vv{x}=\vv{b}$ and $c\vv{x}=\vv{d}$ have the \textbf{same solution set}.
\end{itemize}
\begin{example}{Example: Row Equivalent Matrices Sharing A Solution Set}
Provided:
$$\begin{bmatrix}1&2&-1&\aug&-1\\3&5&-2&\aug&-1\\2&2&1&\aug&1\end{bmatrix}\xrightarrow{\text{Row Equivalent}}\begin{bmatrix}1&0&0&\aug&4\\0&1&0&\aug&-3\\0&0&1&\aug&-1\end{bmatrix}$$
The matrix $\begin{bmatrix}1&0&0&\aug&4\\0&1&0&\aug&-3\\0&0&1&\aug&-1\end{bmatrix}$ has the solution set of \fbox{$\begin{aligned}x_1&=4\\x_2&=-3\\x_3&=-1\end{aligned}$} and as $\begin{bmatrix}1&2&-1&\aug&-1\\3&5&-2&\aug&-1\\2&2&1&\aug&1\end{bmatrix}$ is row equivalent then they have the same solution set thus:
\begin{align*}
\begin{bmatrix}1&2&-1\\3&5&-2\\2&2&1\end{bmatrix}\begin{bmatrix}x_1\\x_2\\x_3\end{bmatrix}&=\begin{bmatrix}-1\\-1\\1\end{bmatrix} \\
\begin{bmatrix}1&2&-1\\3&5&-2\\2&2&1\end{bmatrix}\fbox{$\begin{bmatrix}4\\-3\\-1\end{bmatrix}$}&=\begin{bmatrix}-1\\-1\\1\end{bmatrix}\checkmark
\end{align*}
\end{example}
\begin{example}{Example: Row Equivalent Matrices Sharing Infinitely Many Solutions}
Provided:
$$\begin{bmatrix}1&-2&1&4&\aug&1\\-1&2&1&2&\aug&0\\2&-4&0&2&\aug&1\end{bmatrix}\xrightarrow{\text{Row Equivalent}}\begin{bNiceMatrix}[first-row]
&\downarrow&&\downarrow&&\\
1&-2&0&1&\aug&\frac{1}{2}\\
0&0&1&3&\aug&\frac{1}{2}\\
0&0&0&0&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\end{bNiceMatrix}$$
$x_1$ and $x_3$ are bound variables, are a leading coefficent \\
$x_2$ and $x_4$ are free variables, don't have a leading coefficent \\
This can also be seen in the system of equations below:
$$\sysdelim..\systeme{
x_1-2x_2+x_4=\frac{1}{2},
x_3+3x_4=\frac{1}{2},
0=0
}$$
This makes
\begin{align*}
x_1&=\frac{1}{2}+2x_2-x_4 \\
x_3&=\frac{1}{2}-3x_4
\end{align*}
Giving the solution set of \fbox{$\begin{aligned}x_1&=\frac{1}{2}+2s-t\\x_2&=s\\x_3&=\frac{1}{2}-3t\\x_4&=t\end{aligned}$} or Infinitely Many Solutions, which can also be written in Parametric Vector Form:
\begin{align*}
\vv{x}&=\begin{bmatrix}\frac{1}{2}+2s-t\\s\\\frac{1}{2}-3t\\t\end{bmatrix} \\
&=\begin{bmatrix}\frac{1}{2}\\0\\\frac{1}{2}\\0\end{bmatrix}+\begin{bmatrix}2s\\s\\0\\0\end{bmatrix}+\begin{bmatrix}-t\\0\\-3t\\t\end{bmatrix} \\
\Aboxed{&=\begin{bmatrix}\frac{1}{2}\\0\\\frac{1}{2}\\0\end{bmatrix}+s\begin{bmatrix}2\\1\\0\\0\end{bmatrix}+t\begin{bmatrix}-1\\0\\-3\\1\end{bmatrix}}
\end{align*}
\end{example}
\begin{example}{Example: Row Reducing With No Solution}
$$\begin{bNiceMatrix}
1&2&-1&1&\aug&5\\
\underline{2}&3&-1&4&\aug&8\\
\underline{1}&4&-3&-3&\aug&6
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow[R_2-2R_1]{R_3-R_1}\begin{bNiceMatrix}
1&2&-1&1&\aug&5\\
0&-1&1&2&\aug&-2\\
0&\underline{2}&-2&-4&\aug&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (3mm) ;
\end{bNiceMatrix}\xrightarrow{R_3+2R_2}\begin{bNiceMatrix}[first-row]
&&&&&\downarrow \\
1&2&-1&1&\aug&5\\
0&-1&1&2&\aug&-2\\
0&0&0&0&\aug&-3
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (3mm) ;
\tikz \draw (3-6) circle (3mm) ;
\end{bNiceMatrix}$$
Giving the system of equations:
$$\sysdelim..\systeme{
x_1+2x_2-x_3+x_4=5,
-x_2+x_3+2x_4=-2,
0=-3
}$$
No Solution as $0\ne3$
\end{example}
\begin{definition}{Definition: Homogeneous Linear System}
An $m\times n$ linear system of the form $A\vv{x}=\vv{0}$ is called a \textbf{homogeneous linear system}. (all constant terms are equal to zero)
\end{definition}
\begin{definition}{Definition: Non-Homogeneous Linear System}
If at least one constant term is non zero in the linear system, we say the linear system is \textbf{non-homogeneous}. $\(A\vv{x}=\vv{b}\text{ where }\vv{b}\ne\vv{0}\) $
\end{definition}
Note that $A\vv{x}=\vv{0}$ is \textbf{always} consistent because $\vv{x}=\vv{0}$ is a solution to $A\vv{x}=\vv{0}$ $\(A\vv{0}=\vv{0}\)$ with $\vv{x}=\vv{0}$ being called the \textbf{trivial solution}.
\begin{example}{Example: Solving A Homogeneous And Non Homogeneous Linear System}
Solve the linear systems corresponding to the given augmented matrices: \\ \newline
\begin{tblr}{width = \linewidth, colspec = {X|X}, cells = {valign = m, halign = c}}
{$\begin{aligned}
&\begin{bNiceMatrix}
1&2&3&\aug&0\\
\underline{-1}&4&3&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{R_2+R_1}&\begin{bNiceMatrix}
1&2&3&\aug&0\\
0&6&6&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{\frac{1}{6}R_2}&\begin{bNiceMatrix}
1&2&3&\aug&0\\
0&1&1&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{R_1-2R_2}&\begin{bNiceMatrix}
1&0&1&\aug&0\\
0&1&1&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}
\end{aligned}$\\ \newline
$\begin{aligned}
x_1+x_3&=0\Longrightarrow x_1=-x_3 \\
x_2+x_3&=0\Longrightarrow x_2=-x_3
\end{aligned}$\\ \newline
\fbox{$\begin{aligned}x_1&=-t\\x_2&=-t\\x_3&=t\end{aligned}\hspace{1cm}\vv{x}=t\begin{bmatrix}-1\\-1\\1\end{bmatrix}$}} &
{$\begin{aligned}
&\begin{bNiceMatrix}
1&2&3&\aug&5\\
\underline{-1}&4&3&\aug&7
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{R_2+R_1}&\begin{bNiceMatrix}
1&2&3&\aug&5\\
0&6&6&\aug&12
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{\frac{1}{6}R_2}&\begin{bNiceMatrix}
1&2&3&\aug&5\\
0&1&1&\aug&2
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{R_1-2R_2}&\begin{bNiceMatrix}
1&0&1&\aug&1\\
0&1&1&\aug&2
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}
\end{aligned}$\\ \newline
$\begin{aligned}
x_1+x_3&=1\Longrightarrow x_1=1-x_3 \\
x_2+x_3&=2\Longrightarrow x_2=2-x_3
\end{aligned}$\\ \newline
\fbox{$\begin{aligned}x_1&=1-t\\x_2&=2-t\\x_3&=t\end{aligned}\hspace{1cm}\vv{x}=\begin{bmatrix}1\\2\\0\end{bmatrix}+t\begin{bmatrix}-1\\-1\\1\end{bmatrix}$}}
\end{tblr} \\ \newline
Notice that the differences between them are only a constant matrix.
\end{example}
\begin{example}{\text{Example: Relating $A\vv{x}=\vv{b}$ and $A\vv{x}=\vv{0}$}}
Suppose $\textcolor{red}{A}$ is an $m\times n$ matrix. If $\textcolor{red}{A}\vv{x}=\vv{b}$ has one unique solution, what is the solution set for $\textcolor{red}{A}\vv{x}=\vv{0}$?
\begin{center}
\fbox{$\vv{x}=\vv{0}$}
\end{center}
\end{example}
\begin{definition}{Definition: Rank}
The \textbf{rank} of a matrix $A$, denoted $\text{rank}\(A\)$, is the number of nonzero rows in a REF of $A$.
\begin{example}{Example: What Is The Maximum Rank Of A Matrix? Example 1}
If $A$ is a $5\times7$ matrix, what is the maximum possible value of rank $A$? \fbox{5}
\end{example}
\end{definition}
\begin{example}{Example: What Is The Maximum Rank Of A Matrix? Example 2}
If $A$ is a $7\times5$ matrix, what is the maximum possible value of $\text{rank}\(A\)$?
$$\begin{bNiceMatrix}
1&0&0&0&0 \\
0&1&0&0&0 \\
0&0&1&0&0 \\
0&0&0&1&0 \\
0&0&0&0&1 \\
0&0&0&0&0 \\
0&0&0&0&0 \\
0&0&0&0&0 \\
0&0&0&0&0 \\
\CodeAfter
\SubMatrix{\{}{1-1}{5-1}{.}[left-xshift=0.8mm]
\end{bNiceMatrix}$$
\fbox{$\text{rank}\(A\)=5$} as there is a max of 5 rows and columns such that there is a maximum of one pivot per row and column in REF.
\end{example}
\newpage
\section{Section 2.3 Span and Linear Independence}
\begin{example}{Does \text{$A\vv{x}=\vv{b}$} Have A Solution For All \text{$\vv{b}\in\mathbb{R}^3$}?}
Does $A\vv{x}=\vv{b}$ have a solution for every $\vv{b}\in\mathbb{R}^3$? \\ \newline
\begin{tblr}{width = \linewidth, colspec = {X|X}, cells = {halign = c, valign = m}}
{$A=\begin{bmatrix}1&0&1\\0&1&0\\1&0&2\end{bmatrix}\hspace{1cm}\vv{b}=\begin{bmatrix}b_1\\b_2\\b_3\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}
\[A\Big|\vv{b}\] = &\begin{bNiceMatrix}
1&0&1&\aug&b_1 \\
0&1&0&\aug&b_2 \\
1&0&2&\aug&b_3
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{R_3-R_1}&\underbrace{\begin{bNiceMatrix}
1&0&1&\aug&b_1 \\
0&1&0&\aug&b_2 \\
0&0&1&\aug&b_3-b_1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}}_{\text{REF}}
\end{aligned}$ \\ \vspace{2ex}
\parbox{\linewidth}{
\begin{flushleft}
One unique solution exists to $A\vv{x}=\vv{b}$ for every $\vv{b}\in\mathbb{R}^3$ (no leading entry/pivot in constants column and all variables are bound)
\end{flushleft}} \\ \newline
$\text{rank}\(A\)=3$} &
{$A=\begin{bmatrix}1&0&1\\0&1&0\\1&0&1\end{bmatrix}\hspace{1cm}\vv{b}=\begin{bmatrix}b_1\\b_2\\b_3\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}
\[A\Big|\vv{b}\] = &\begin{bNiceMatrix}
1&0&1&\aug&b_1 \\
0&1&0&\aug&b_2 \\
1&0&1&\aug&b_3
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix} \\
\xrightarrow{R_3-R_1}&\underbrace{\begin{bNiceMatrix}
1&0&1&\aug&b_1 \\
0&1&0&\aug&b_2 \\
0&0&0&\aug&b_3-b_1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}}_{\text{REF}}
\end{aligned}$ \\ \vspace{2ex}
\parbox{\linewidth}{
\begin{flushleft}
2 Possibilities: If $b_3-b_1\ne0$, there is a leading entry in the constants column so \textbf{no solution} exists to $A\vv{x}=\vv{b}$. If $b_3-b_1=0$, infinitely many solutions exists ($x_3$ is a free variable) $A\vv{x}=\vv{b}$ is \textbf{not} always consistent.
\end{flushleft}} \\ \newline
$\text{rank}\(A\)=2$}
\end{tblr}
\end{example}
For an $\textcolor{red}{m}\times n$ matrix $A$, $A\vv{x}=\vv{b}$ is consistent (one or infinitely many solutions exists) for all $\vv{b}\in\mathbb{R}^m$ if and only if $\text{rank}A=\textcolor{red}{m}$. \\ \newline
If $A\vv{x}=\vv{b}$ is consistent for all $\vv{b}\in\mathbb{R}^m$, then every vector in $\mathbb{R}^m$ is a linear combination of the columns in $A$. In other words the columns of $A$ \textbf{span} $\mathbb{R}^m$.
\begin{definition}{Definition: Span}
The set of linear combinations of the vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\in\mathbb{R}^n$ is the span of $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$, denoted $\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)$.
$$\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)=\left\{c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_k\vv{v_k}\big|c_1,c_2,\hdots,c_k\in\mathbb{R}\right\}$$
We say the vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ span $\mathbb{R}^n$ or generate $\mathbb{R}^n$ if:
$$\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)=\mathbb{R}^n$$
\begin{example}{Example: Comparing A Set And A span}
$$\text{span}\(\left. \begin{bmatrix}1\\2\end{bmatrix}\)=\left\{c\begin{bmatrix}1\\2\end{bmatrix}\right|c\in\mathbb{R}\right\}$$
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -5, ymax = 5,
xmin = -5, xmax = 5,
xlabel = {$x$},
ylabel = {$y$},
%xtick = {100},
%ytick = {100},
%xticklabels={},
every axis plot post/.append style={ultra thick},
clip = true,
axis line style = {Stealth-Stealth, thick},
legend style = {draw = none},
legend pos = outer north east,
title = {$\left\{\begin{bsmallmatrix}1\\2\end{bsmallmatrix}\right\}$ vs. $\text{span}\(\begin{bsmallmatrix}1\\2\end{bsmallmatrix}\)$}
]
\addplot [
black,
style = {Stealth-Stealth, dotted},
samples = 200,
domain = -2.5:2.5,
] {2*x} node[above right, pos = 0, rotate =63.434948822922010648427806279547] {$\text{span}\(\begin{bsmallmatrix}1\\2\end{bsmallmatrix}\)$};
\draw[-Stealth, ultra thick] (0,0) -- (1,2) node[right] {$\left\{\begin{bsmallmatrix}1\\2\end{bsmallmatrix}\right\}$};
\end{axis}
\end{tikzpicture}
\end{center}
\end{example}
\end{definition}
\begin{example}{Example: span That Equals \text{$\mathbb{R}^n$}}
$$\text{span}\(\left. \begin{bmatrix}1\\2\end{bmatrix},\begin{bmatrix}2\\-2\end{bmatrix}\)=\left\{c\begin{bmatrix}1\\2\end{bmatrix}+d\begin{bmatrix}2\\-2\end{bmatrix}\right|c,d\in\mathbb{R}\right\}$$
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -5, ymax = 5,
xmin = -5, xmax = 5,
xlabel = {$x$},
ylabel = {$y$},
%xtick = {100},
%ytick = {100},
%xticklabels={},
every axis plot post/.append style={ultra thick},
clip = true,
axis line style = {Stealth-Stealth, thick},
legend style = {draw = none},
legend pos = outer north east,
]
\addplot [
black,
style = {Stealth-Stealth, dotted},
samples = 200,
domain = -2.5:2.5,
] {2*x};
\draw[-Stealth, ultra thick] (0,0) -- (1,2);
\addplot [
black,
style = {Stealth-Stealth, dotted},
samples = 200,
domain = -5:5,
] {-x};
\draw[-Stealth, ultra thick] (0,0) -- (2,-2);
\addplot[mark=*] coordinates {(4,3)};
\addplot[mark=*] coordinates {(3.5,-2)};
\end{axis}
\end{tikzpicture} \\ \newline
$\text{span}\(\begin{bmatrix}1\\2\end{bmatrix},\begin{bmatrix}2\\-2\end{bmatrix}\)=\mathbb{R}^2$ thus $\begin{bmatrix}1\\2\end{bmatrix}$ and $\begin{bmatrix}2\\-2\end{bmatrix}$ span $\mathbb{R}^2$.
\end{center}
\end{example}
\begin{example}{Example: Algebraic span}
\begin{align*}
\text{span}\( \begin{bmatrix}1\\1\\0\\1\end{bmatrix},\begin{bmatrix}0\\1\\0\\0\end{bmatrix},\begin{bmatrix}1\\0\\1\\1\end{bmatrix}\)&=\left\{\left. c_1\begin{bmatrix}1\\1\\0\\1\end{bmatrix}+c_2\begin{bmatrix}0\\1\\0\\0\end{bmatrix}+c_3\begin{bmatrix}1\\0\\1\\1\end{bmatrix}\right| c_1,c_2,c_3\in\mathbb{R}\right\} \\
&=\left\{\left. \begin{bmatrix}\underline{c_1+c_3}\\c_1+c_2\\c_3\\\underline{c_1+c_3}\end{bmatrix}\right|c_1,c_2,c_3\in\mathbb{R}\right\}
\end{align*}
Every vector in the set has its 1\textsuperscript{st} and 4\textsuperscript{th} component equal to each other.
\end{example}
\begin{example}{Example: If A Vector Is In A span}
Is $\vv{b}=\begin{bmatrix}1\\0\\0\end{bmatrix}$ in $\text{span}\(\vv{v_1},\vv{v_2},\vv{v_3}\)$ where $\vv{v_1}=\begin{bmatrix}1\\1\\-4\end{bmatrix}$, $\vv{v_2}=\begin{bmatrix}0\\-1\\3\end{bmatrix}$, $\vv{v_3}=\begin{bmatrix}3\\-1\\0\end{bmatrix}$? \\ \newline
This is the equivelent of asking the following questions:
\begin{itemize}
\item Is $\vv{b}\in\text{span}\(\vv{v_1},\vv{v_2},\vv{v_3}\)$? \textbf{No}
\item Is $\vv{b}$ a linear combination of $\vv{v_1},\vv{v_2}$ and $\vv{v_3}$? \textbf{No}
\item Does $x_1\vv{v_1}+x_2\vv{v_2}+x_3\vv{v_3}=\vv{b}$ have a solution? \textbf{No}
\item Is $\begin{bmatrix}\vv{v_1}&\vv{v_2}&\vv{v_3}\end{bmatrix}\vv{x}=\vv{b}$ consistent? \textbf{No}
\end{itemize}
$$\begin{bmatrix}1&0&3&\aug&1\\1&-1&-1&\aug&0\\-4&3&0&\aug&0\end{bmatrix}\xrightarrow[R_2-R_1]{R_3+4R_1}\begin{bmatrix}1&0&3&\aug&1\\0&-1&-4&\aug&-1\\0&3&12&\aug&4\end{bmatrix}\xrightarrow{R_3+3R_2}\begin{bNiceMatrix}
1&0&3&\aug&1\\
0&-1&-4&\aug&-1\\
0&0&0&\aug&1
\CodeAfter
\tikz \draw (3-5) circle (2mm) ;
\end{bNiceMatrix}$$
As $0\ne1$ then there is no solution such that $A\vv{x}=\vv{b}$ is consistent so $\vv{b}$ is \textbf{not} in $\text{span}\(\vv{v_1},\vv{v_2},\vv{v_3}\)$. This also means that all of the above questions are false.
\end{example}
Let $A$ be an $m\times n$ matrix with column vectors $\vv{a_1},\vv{a_2},\hdots,\vv{a_n}$. Let $\vv{b}\in\mathbb{R}^m$. The linear system $A\vv{x}=\vv{b}$ is consistant if and only if $\vv{b}\in\text{span}\(\vv{a_1},\vv{a_2},\hdots,\vv{a_n}\)$.
\begin{theorem}{Theorem: Relating Consistence And span}
Let $A$ be an $m\times n$ matrix with column vectors $\vv{a_1},\vv{a_2},\hdots,\vv{a_n}$. Then the following statements are equivalent (either all True of all are False):
\begin{enumerate}
\item $\text{rank}A=m$
\item $A\vv{x}=\vv{b}$ is consistant for all $\vv{b}\in\mathbb{R}^m$
\item $\text{span}\(\vv{a_1},\vv{a_2},\hdots,\vv{a_n}\)=\mathbb{R}^m$ (the column vectors of $A$ span $\mathbb{R}^m$) ($\vv{a_1},\vv{a_2},\hdots,\vv{a_n}$ span $\mathbb{R}^m$)
\end{enumerate}
\end{theorem}
\begin{example}{Example: Finding Rank To See If A span Spans $\mathbb{R}^m$}
Does $\text{span}\(\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}-1\\-1\\2\end{bmatrix},\begin{bmatrix}2\\-1\\1\end{bmatrix},\begin{bmatrix}3\\4\\8\end{bmatrix}\)=\mathbb{R}^3$?
$$\begin{bmatrix}1&-1&2&3\\2&-1&-1&4\\3&-2&1&8\end{bmatrix}\xrightarrow[R_3-3R_1]{R_2-2R_1}\begin{bmatrix}1&-1&2&3\\0&1&-5&-2\\0&1&-5&-1\end{bmatrix}\xrightarrow{R_3-R_2}\begin{bmatrix}1&-1&2&3\\0&1&-5&-2\\0&0&0&-3\end{bmatrix}$$
$\text{rank}A=3=\text{number of rows in }A$ thus $\text{span}\(\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}-1\\-1\\2\end{bmatrix},\begin{bmatrix}2\\-1\\1\end{bmatrix},\begin{bmatrix}3\\4\\8\end{bmatrix}\)=\mathbb{R}^3$.
\end{example}
\begin{example}{Example: Do The Columns span $\mathbb{R}^m$?}
Do the columns of $A=\begin{bmatrix}1&2&0\\-1&0&2\\0&1&1\end{bmatrix}$ span $\mathbb{R}^3$?
$$A=\begin{bmatrix}1&2&0\\-1&0&2\\0&1&1\end{bmatrix}\xrightarrow{R_2+R_1}\begin{bmatrix}1&2&0\\0&2&2\\0&1&1\end{bmatrix}\xrightarrow{R_3-\frac{1}{2}R_2}\begin{bmatrix}1&2&0\\0&2&2\\0&0&0\end{bmatrix}$$
$\text{rank}A=2\ne\text{number of rows in }A$ so the columns of $A$ do \textbf{not} span $\mathbb{R}^3$.
\end{example}
\begin{example}{Example: If More Vectors Are Added To A Span Do Past Elements Change?}
Suppose $\vv{x}\in\text{span}\(\vv{u},\vv{v}\)$. Is $\vv{x}\in\text{span}\(\vv{u},\vv{v},\vv{w}\)$? \\ \newline
\textbf{Yes}, as since $\vv{x}\in\text{span}\(\vv{u},\vv{v}\)$ then $\vv{x}=c_1\vv{u}+c_2\vv{v}$ (for some $c_1,c_2\in\mathbb{R}$) which also means that $\vv{x}=c_1\vv{u}+c_2\vv{v}+0\vv{w}$ so $\vv{x}\in\text{span}\(\vv{u},\vv{v},\vv{w}\)$ as it can be written as a linear combination of $\vv{u},\vv{v}$ and $\vv{w}$.
\end{example}
\begin{example}{Example: Do Spans Change When Elements Are Linear Combinations?}
Suppose $\vv{w}=3\vv{u}-2\vv{v}$ and $\vv{x}\in\text{span}\(\vv{u},\vv{v},\vv{w}\)$. Is $\vv{x}\in\text{span}\(\vv{u},\vv{v}\)$?
\begin{align*}
\vv{x}&=c_1\vv{u}+c_2\vv{v}+c_3\vv{w} \\
&=c_1\vv{u}+c_2\vv{v}+c_3\(3\vv{u}-2\vv{v}\) \\
&=c_1\vv{u}+c_2\vv{v}+3c_3\vv{u}-2c_3\vv{v} \\
&=\(c_1+3c_3\)\vv{u}+\(c_2-2c_3\)\vv{v}
\end{align*}
\textbf{Yes}, as you can write $\vv{x}$ as a linear combination of $\vv{u}$ and $\vv{v}$. This also means that
$$\text{span}\(\vv{u},\vv{v},\vv{w}\)=\text{span}\(\vv{u},\vv{v}\)$$
\end{example}
\begin{theorem}{Theorem: A Span Can Contain Linear Combinations}
Let $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\in\mathbb{R}^n$ if $\vv{v_i}\in\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v}_{i-1},\vv{v}_{i+1},\hdots,\vv{v_k}\)$ then
$$\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)=\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v}_{i-1},\vv{v}_{i+1},\hdots,\vv{v_k}\)$$
\begin{example}{Example: Span Having Linear Combination}
If $\vv{b}\in\text{span}\(\vv{u},\vv{v},\vv{w}\)$ then $$\text{span}\(\vv{u},\vv{v},\vv{w},\vv{b}\)=\text{span}\(\vv{u},\vv{v},\vv{w}\)$$
\end{example}
\end{theorem}
\begin{example}{Example: Span And Homogeneous Linear Systems}
Suppose $\vv{b}\in\text{span}\(\vv{u},\vv{v}\)$. How many solutions exist to the homogeneous linear system \\ $x_1\vv{u}+x_2\vv{v}+x_3\vv{b}=\vv{0}$?
$$\begin{bmatrix}\vv{u}&\vv{v}&\vv{b}\end{bmatrix}\begin{bmatrix}x_1\\x_2\\x_3\end{bmatrix}=\vv{0}$$
One solution is $\underbrace{x_1=0,x_2=0,x_3=0}_{\text{Trivial Solution}}$: $0\vv{u}+0\vv{v}+0\vv{b}=0\checkmark$
\begin{align*}
\vv{b}\in\text{span}\(\vv{u},\vv{v}\)&\rightarrow\vv{b}\text{ is a linear combination of }\vv{u}\text{ and }\vv{v} \\
&\rightarrow\vv{b}=c\vv{u}+d\vv{v}\text{ for some }c,d\in\mathbb{R} \\
&\rightarrow\vv{0}=c\vv{u}+d\vv{v}-\vv{b} \\
&\rightarrow \fbox{$\begin{aligned}x_1&=c\\x_2&=d\\x_3&=-1\end{aligned}$} \text{ is also a solution}
\end{align*}
As there are at least 2 solutions to the homogeneous linear system then there are \textbf{Infinitely Many Solutions} to $x_1\vv{u}+x_2\vv{v}+x_3\vv{b}=\vv{0}$.
\end{example}
\begin{definition}{Definition: Linearly Independent And Linearly Dependent}
The vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\in\mathbb{R}^n$ are \textbf{linearly independent} if the homogeneous linear system $x_1\vv{v_1}+x_2\vv{v_2}+\hdots+x_k\vv{v_k}=\vv{0}$ has only the trivial solution $x_1=x_2=\hdots=x_k=0$. If infinitely many solutions exist to $x_1\vv{v_1}+x_2\vv{v_2}+\hdots+x_k\vv{v_k}=\vv{0}$ then the vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ are \textbf{linearly dependent}.
\end{definition}
Notes:
\begin{itemize}
\item In previous example, $\vv{u},\vv{v}$ and $\vv{b}$ are linearly dependent vectors.
\item {We use linearly independent/linearly dependent to describe a set of vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ or to describe the columns of $A$:
\begin{itemize}
\item If \underline{$A\vv{x}=\vv{0}$} has only the trivial solution $\vv{x}=\vv{0}$, the columns of $A$ are linearly independent, If \underline{$A\vv{x}=\vv{0}$} has infinitely many solutions, the columns of $A$ are linearly dependent.
\end{itemize}}
\end{itemize}
\begin{example}{Example: Are Vectors Linearly Independent Or Dependent?}
Determine if the following vectors are linearly independent or linearly dependent: \\ \newline
\begin{tblr}{width = \linewidth, colspec = {X|X}, cells = {halign = c, valign = m}}
{$\begin{bmatrix}1\\0\\1\end{bmatrix},\begin{bmatrix}0\\1\\0\end{bmatrix},\begin{bmatrix}1\\0\\-1\end{bmatrix}$ \\ \vspace{2.5ex}
Solve $x_1\begin{bmatrix}1\\0\\1\end{bmatrix}+x_2\begin{bmatrix}0\\1\\0\end{bmatrix}+x_3\begin{bmatrix}1\\0\\-1\end{bmatrix}=\begin{bmatrix}0\\0\\0\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}
\begin{bNiceMatrix}
1&0&1&\aug&0\\
0&1&0&\aug&0\\
1&0&-1&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow{R_3-R_1}&\begin{bmatrix}1&0&1&\aug&0\\0&1&0&\aug&0\\0&0&-2&\aug&0\end{bmatrix} \\
\xrightarrow{-\frac{1}{2}R_3}&\begin{bmatrix}1&0&1&\aug&0\\0&1&0&\aug&0\\0&0&1&\aug&0\end{bmatrix} \\
\xrightarrow{R_1-R_3}&\begin{bmatrix}1&0&0&\aug&0\\0&1&0&\aug&0\\0&0&1&\aug&0\end{bmatrix}
\end{aligned}$ \\ \newline
\parbox{\linewidth}{\begin{flushleft}
As all variables are bound then there is only one solution that exists so the vectors are linearly independent.\end{flushleft}} \\ \newline
\fbox{$\begin{aligned}x_1&=0\\x_2&=0\\x_3&=0\end{aligned}$}} &
{$\begin{bmatrix}1\\2\\1\end{bmatrix},\begin{bmatrix}1\\1\\1\end{bmatrix},\begin{bmatrix}1\\0\\1\end{bmatrix}$ \\ \vspace{2.5ex}
Solve $x_1\begin{bmatrix}1\\2\\1\end{bmatrix}+x_2\begin{bmatrix}1\\1\\1\end{bmatrix}+x_3\begin{bmatrix}1\\0\\1\end{bmatrix}=\begin{bmatrix}0\\0\\0\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}
\begin{bNiceMatrix}
1&1&1&\aug&0\\
2&1&0&\aug&0\\
1&1&1&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow{R_2-2R_1}&\begin{bmatrix}1&1&1&\aug&0\\0&-1&-2&\aug&0\\1&1&1&\aug&0\end{bmatrix} \\
\xrightarrow{R_3-R_1}&\begin{bmatrix}1&1&1&\aug&0\\0&-1&-2&\aug&0\\0&0&0&\aug&0\end{bmatrix} \\
\end{aligned}$ \\ \newline
\parbox{\linewidth}{\begin{flushleft}
There are infinitely many solutions as the vectors are linearly dependent as $x_3$ is a free variable ($x_1,x_2$ are bound).\end{flushleft}} \\ \newline
$\begin{aligned}
x_1-x_3&=0 \\
x_2+2x_3&=0 \\
0&=0
\end{aligned}\hspace{1cm}$\fbox{$\begin{aligned}x_1&=t\\x_2&=-2t\\x_3&=t\end{aligned}$}}
\end{tblr}
\end{example}
\begin{example}{Example: Are The Columns Of A Matrix Linearly Independent?}
Are the columns of $A=\begin{bmatrix}1&3&5\\2&4&6\end{bmatrix}$ linearly independent?
$$\text{Solve }A\vv{x}=\vv{0}\text{: }\begin{bmatrix}1&3&5&\aug&0\\2&4&6&\aug&0\end{bmatrix}\xrightarrow{R_2-2R_1}\begin{bNiceMatrix}[first-row]
&&\downarrow&&\\
1&3&5&\aug&0\\
0&-2&-4&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (3mm) ;
\end{bNiceMatrix}$$
A free variable exists in the system $A\vv{x}=\vv{0}$ so $A\vv{x}=\vv{0}$ has infinitely many solutions, thus the columns of $A$ are linearly dependent.
\end{example}
\begin{theorem}{Theorem: Linear Combinations In Sets And Linearly Dependent}
The vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\in\mathbb{R}^n$ are linearly dependent if and only if one of the vectors in the set is a linear combination of the other vectors in the set.
\end{theorem}
\begin{proof}
If one of the vectors, say $\vv{v_1}$, is a linear combination of the others, then there are scalars $c_2,\hdots,c_m$ such that $\vv{v_1}=c_2\vv{v_2}+\hdots+c_m\vv{v_m}$. Rearranging, we obtain $\vv{v_1}-c_2\vv{v_2}-\hdots-c_m\vv{v_m}=\vv{0}$, which implies that $\vv{v_1},\vv{v_2},\hdots,\vv{v_m}$ are linearly dependent, since at least one of the scalars (namely, the coefficient $1$ of $\vv{v_1}$) is nonzero. \\ \newline
Conversely, suppose that $\vv{v_1},\vv{v_2},\hdots,\vv{v_m}$ are linearly dependent. Then there are scalars $c_1,c_2,\hdots,c_m$, not all zero, such that $c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_m\vv{v_m}=\vv{0}$. Suppose $c_1\ne0$. Then
$$c_1\vv{v_1}=-c_2\vv{v_2}-\hdots-c_m\vv{v_m}$$
and we may multiply both sides by $\dfrac{1}{c_1}$ to obtain $\vv{v_1}$ as a linear combination of the other vectors:
$$\vv{v_1}=-\(\frac{c_2}{c_1}\)\vv{v_2}-\hdots-\(\frac{c_m}{c_1}\)\vv{v_m}$$
\end{proof}
\begin{example}{Example: Linear Combination In Set Of Vectors}
$\begin{bmatrix}1\\2\\1\end{bmatrix},\begin{bmatrix}1\\1\\1\end{bmatrix},\begin{bmatrix}1\\0\\1\end{bmatrix}$ are linearly dependent.
$$x_1\begin{bmatrix}1\\2\\1\end{bmatrix}+x_2\begin{bmatrix}1\\1\\1\end{bmatrix}+x_3\begin{bmatrix}1\\0\\1\end{bmatrix}=\begin{bmatrix}0\\0\\0\end{bmatrix}$$
Which has the solution set of $\begin{aligned}x_1&=t\\x_2&=-2t\\x_3&=t\end{aligned}\text{ }\(t\in\mathbb{R}\)$. If $t=2$ then $\begin{aligned}x_1&=2\\x_2&=-4\\x_3&=2\end{aligned}$ thus:
\begin{align*}
2\begin{bmatrix}1\\2\\1\end{bmatrix}-4\begin{bmatrix}1\\1\\1\end{bmatrix}+2\begin{bmatrix}1\\0\\1\end{bmatrix}&=\begin{bmatrix}0\\0\\0\end{bmatrix} \\
4\begin{bmatrix}1\\1\\1\end{bmatrix}-2\begin{bmatrix}1\\0\\1\end{bmatrix}&=2\begin{bmatrix}1\\2\\1\end{bmatrix}\\
2\begin{bmatrix}1\\1\\1\end{bmatrix}-1\begin{bmatrix}1\\0\\1\end{bmatrix}&=\begin{bmatrix}1\\2\\1\end{bmatrix}
\end{align*}
Making $\begin{bmatrix}1\\2\\1\end{bmatrix}$ a linear combination of $\begin{bmatrix}1\\1\\1\end{bmatrix}$ and $\begin{bmatrix}1\\0\\1\end{bmatrix}$.
\end{example}
\begin{theorem}{Theorem: Relating rank\text{,} \text{$A\vv{x}=\vv{0}$}\text{,} And Linearly Independent Columns}
Let $A$ be an $m\times n$ matrix with column vectors $\vv{a_1},\vv{a_2},\hdots,\vv{a_n}$. The following are equivalent (all are True or False):
\begin{enumerate}
\item $\text{rank}A=n$
\item $A\vv{x}=\vv{0}$ has only the trivial solution $\vv{x}=\vv{0}$.
\item The columns of $A$ are linearly independent ($\vv{a_1},\vv{a_2},\hdots,\vv{a_n}$ are linearly independent)
\end{enumerate}
\end{theorem}
\begin{example}{Example: If \text{$\text{rank}=n$} Then Linearly Independent}
$$A=\begin{bmatrix}1&1&-1\\0&-2&4\\2&0&2\\2&4&-5\end{bmatrix}\xrightarrow{\text{Row Equivalent}}\begin{bNiceMatrix}
1&0&0\\
0&1&0\\
0&0&1 \\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}$$
$\text{rank}A=3=\text{number of columns in } A$, thus the columns of $A$ are linearly independent.
\end{example}
\begin{example}{Example: Linearly Dependent Or Linearly Independent Based On Size}
If $B$ is $3\times5$, are the columns of $B$ linearly independent or linearly dependent? \\ \newline
$\text{rank}B$ is at most 3 so $\text{rank}B\ne5$ thus the columns of $B$ are linearly dependent.
\end{example}
Note:
\begin{itemize}
\item Any set of vectors from $\mathbb{R}^n$ containing \textbf{more then} $n$ vectors must be linearly dependent.
\begin{proof}
Let $\vv{v_1},\vv{v_2},\hdots,\vv{v_m}$ be (column) vectors in $\mathbb{R}^n$ and let $A$ be the $n\times m$ matrix $\begin{bmatrix}\vv{v_1}&\vv{v_2}&\hdots&\vv{v_m}\end{bmatrix}$ with these vectors as its columns. Then $\vv{v_1},\vv{v_2},\hdots,\vv{v_m}$ are linearly dependent if and only if the homogeneous linear system with augmented matrix $\begin{bmatrix}A&\aug&\vv{0}\end{bmatrix}$ has a nontrivial solution. But this will always be the case if $A$ has more columns than rows; it is the case here, since number of columns $m$ is greater than number of rows $n$.
\end{proof}
\end{itemize}
\begin{example}{Example: Is A Set Of Vectors Containg \text{$\vv{0}$} Linearly Independent}
$\vv{u},\vv{v},\vv{0}\in\mathbb{R}^n$ Are $\vv{u},\vv{v},\vv{0}$ linearly independent? \\ \newline
Can be rephrased as, how many solutions exists to:
$$x_1\vv{u}+x_2\vv{v}+x_3\vv{0}=\vv{0}$$
Of which there is the trivial solution of $\begin{aligned}x_1&=0\\x_2&=0\\x_3&=0\end{aligned}$ and the non trivial solution of $\begin{aligned}x_1&=0\\x_2&=0\\x_3&=2\end{aligned}$. As there are two possible solutions then there are infinitely many solutions so the set of vectors is linearly dependent.
\end{example}
Note:
\begin{itemize}
\item If $\vv{0}$ is in a set of vectors, the set must be linearly dependent.
\end{itemize}
The \textbf{rank} of a matrix is equal to the number of linearly independent columns in the matrix.
\begin{example}{Example: How Many Linearly independent Columns?}
$$A=\begin{bNiceMatrix}[first-row]
\downarrow&\downarrow&&&\downarrow\\
1&0&5&0&2\\
0&1&3&4&1\\
-1&1&-2&4&1\\
0&0&0&0&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-5) circle (2mm) ;
\end{bNiceMatrix}\longrightarrow\begin{bNiceMatrix}[first-row]
\downarrow&\downarrow&&&\downarrow\\
1&0&5&0&0\\
0&1&3&4&0\\
0&0&0&0&1\\
0&0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-5) circle (2mm) ;
\end{bNiceMatrix}=U$$
$\text{rank}A=3\ne5$ so $\vv{a_1},\vv{a_2},\vv{a_3},\vv{a_4},\vv{a_5}$ are linearly dependent, however $\vv{a_1},\vv{a_2},\vv{a_5}$ are linearly independent as they have a leading coefficient, and aren't linear combinations of other columns.
\begin{tblr}{width = \linewidth,colspec = {X|X}, cells = {mode = dmath, valign = m, halign = c}}
{\begin{aligned}5\begin{bmatrix}1\\0\\0\\0\end{bmatrix}+3\begin{bmatrix}0\\1\\0\\0\end{bmatrix}&=\begin{bmatrix}5\\3\\0\\0\end{bmatrix} \\
5\begin{bmatrix}1\\0\\-1\\0\end{bmatrix}+3\begin{bmatrix}0\\1\\1\\0\end{bmatrix}&=\begin{bmatrix}5\\3\\-2\\0\end{bmatrix}\end{aligned}} &
{\begin{aligned}4\begin{bmatrix}0\\1\\0\\0\end{bmatrix}&=\begin{bmatrix}0\\4\\0\\0\end{bmatrix} \\
4\begin{bmatrix}0\\1\\1\\0\end{bmatrix}&=\begin{bmatrix}0\\4\\4\\0\end{bmatrix}\end{aligned}}
\end{tblr}
$$\text{span}\(\vv{a_1},\vv{a_2},\vv{a_3},\vv{a_4},\vv{a_5}\)=\text{span}\(\vv{a_1},\vv{a_2},\vv{a_5}\)$$
We need $n$ linearly independent vectors from $\mathbb{R}^n$ to span $\mathbb{R}^n$. \\ \newline
Also note that although row equivalent matrices share a lot of properties they aren't equal, $\vv{a_1}\ne\vv{u_1}$.
\end{example}
Notes:
\begin{itemize}
\item Any 2 linearly independent vectors from $\mathbb{R}^2$ spans $\mathbb{R}^2$.
\item Any 3 linearly independent vectors from $\mathbb{R}^3$ spans $\mathbb{R}^3$.
\item In general, any $n$ linearly independent vectors from $\mathbb{R}^n$ spans $\mathbb{R}^n$.
\end{itemize}
\begin{example}{Example: \text{$\vv{a_1}\ne\vv{u_1}$}}
Suppose $\begin{bmatrix}\vv{v_1}&\vv{v_2}&\vv{v_3}&\vv{v_4}&\vv{v_5}\end{bmatrix}=\begin{bmatrix}1&2&0&2&0\\2&1&3&2&4\\1&2&0&2&0\\0&2&-2&0&0\end{bmatrix}$ has RREF \\$\begin{bmatrix}\vv{u_1}&\vv{u_2}&\vv{u_3}&\vv{u_4}&\vv{u_5}\end{bmatrix}=\begin{bNiceMatrix}
1&0&2&0&4\\
0&1&-1&0&0\\
0&0&0&1&-2\\
0&0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-4) circle (2mm) ;
\end{bNiceMatrix}$.
\begin{center}$\vv{v_1},\vv{v_2},\vv{v_4}$ are linearly independent, leading coefficent\end{center}
\begin{align*}
2\vv{v_1}-\vv{v_2}&=\vv{v_3} & 2\vv{u_1}-\vv{u_2}&=\vv{u_3} \\
4\vv{v_1}-2\vv{v_4}&=\vv{v_5} & 4\vv{u_1}-2\vv{u_4}&=\vv{u_5}
\end{align*}
\end{example}
Notes:
\begin{itemize}
\item In general, $\vv{v_i}\ne\vv{u_i}$ in row equivalent matrices but relationships bewtween column vectors is preserved.
\end{itemize}
\newpage
\section{Section 3.3 Inverses of Matrices}
\begin{definition}{Definition: Invertable}
An $n\times n$ matrix $A$ is \textbf{invertible} if there is an $n\times n$ matrix $B$ such that $AB=I_n$ and $BA=I_n$ (where $I_n$ is the $n\times n$ identity matrix). We call $B$ the \textbf{inverse} of $A$, denoted $A^{-1}$.
\begin{example}{Example: Invertible Matrix}
Verify $\displaystyle{\frac{1}{16}\begin{bmatrix}2&-8\\1&4\end{bmatrix}}$ is the inverse of $\begin{bmatrix}4&8\\-1&2\end{bmatrix}$.
$$\frac{1}{16}\begin{bmatrix}2&-8\\1&4\end{bmatrix}\begin{bmatrix}4&8\\-1&2\end{bmatrix}=\frac{1}{16}\begin{bmatrix}16&0\\0&16\end{bmatrix}=\begin{bmatrix}1&0\\0&1\end{bmatrix}=I_2$$
$$\begin{bmatrix}4&8\\-1&2\end{bmatrix}\(\frac{1}{16}\begin{bmatrix}2&-8\\1&4\end{bmatrix}\)=I_2$$
\end{example}
\end{definition}
Notes:
\begin{itemize}
\item If $A$ is invertible then $A^{-1}$ is unique.
\begin{proof}
In mathematics, a standard way to show that there is just one of something is to show that there cannot be more than one. So, suppose that $A$ has two inverses, say $A'$ and $A''$. Then
$$AA'=I=A'A\qquad\text{and}\qquad AA''=I=A''A$$
Thus,
$$A'=A'I=A'\(AA''\)=\(A'A\)A''=IA''=A''$$
Hence, $A'=A''$, and the inverse is unique.
\end{proof}
\item {Not all square matrices are invertible. $n\times n$ matrices can be grouped onto 2 categories:
\begin{itemize}
\item Invertible/nonsingular
\item Noninvertible/singular
\end{itemize}}
\end{itemize}
\begin{theorem}{Theorem: You Don't Need \text{$AC=I_n$ and $CA=I_n$}}
Let $A$ be an $n\times n$ matrix. If there is a matrix $C$ such that $C$ is $n\times n$ and $AC=I_n$ or $CA=I_n$ then $C$ is the inverse of $A$. \\ \newline
Notes:
\begin{itemize}
\item If $C=A^{-1}$, then $A=C^{-1}$.
\end{itemize}
\end{theorem}
If $\begin{bmatrix}a\end{bmatrix}\ne\begin{bmatrix}0\end{bmatrix}$, then $\displaystyle{\begin{bmatrix}a\end{bmatrix}^{-1}=\begin{bmatrix}\frac{1}{a}\end{bmatrix}}$ \\ \newline
If $\begin{bmatrix}a\end{bmatrix}=\begin{bmatrix}0\end{bmatrix}$, $\begin{bmatrix}a\end{bmatrix}$ is not invertible.
\newpage
\subsection{Inverses of $2\times2$ Matrices}
\begin{theorem}{Theorem: Finding The Inverse Of A \text{$2\times2$} Matrix}
Let $A=\begin{bmatrix}a&b\\c&d\end{bmatrix}$ then:
\begin{itemize}
\item If $ad-bc=0$, then $A$ is not invertible.
\item {If $ad-bc\ne0$, then $A$ is invertible and is:
$$A^{-1}=\frac{1}{ad-bc}\begin{bmatrix}d&-b\\-c&a\end{bmatrix}$$}
\end{itemize}
\end{theorem}
\begin{proof}
Suppose that $\text{det}A=ad-bc\ne0$ Then
$$\begin{bmatrix}a&b\\c&d\end{bmatrix}\begin{bmatrix}d&-b\\-c&a\end{bmatrix}=\begin{bmatrix}ad-bc&-ab+ba\\cd-dc&-cb+da\end{bmatrix}=\begin{bmatrix}ad-bc&0\\0&ad-bc\end{bmatrix}=\text{det}A\begin{bmatrix}1&0\\0&1\end{bmatrix}$$
Similarly,
$$\begin{bmatrix}d&-b\\-c&a\end{bmatrix}\begin{bmatrix}a&b\\c&d\end{bmatrix}=\text{det}A\begin{bmatrix}1&0\\0&1\end{bmatrix}$$
Since $\text{det}A\ne0$, we can multiply both sides of each equation by $\dfrac{1}{\text{det}A}$ to obtain
$$\begin{bmatrix}a&b\\c&d\end{bmatrix}\(\frac{1}{\text{det}A}\begin{bmatrix}d&-b\\-c&a\end{bmatrix}\)=\begin{bmatrix}1&0\\0&1\end{bmatrix}\qquad\text{and}\qquad\(\frac{1}{\text{det}A}\begin{bmatrix}d&-b\\-c&a\end{bmatrix}\)\begin{bmatrix}a&b\\c&d\end{bmatrix}=\begin{bmatrix}1&0\\0&1\end{bmatrix}$$
The the matrix $\dfrac{1}{\text{det}A}\begin{bmatrix}d&-b\\-c&a\end{bmatrix}$ satisfies the definition of an inverse, so $A$ is invertible. Since the inverse of $A$ is unique we must have
$$A^{-1}=\frac{1}{\text{det}A}\begin{bmatrix}d&-b\\-c&a\end{bmatrix}$$
Conversely, assume that $ad-bc=0$. We will consider separately the cases where $a\ne0$ and where $a=0$. If $a\ne0$, then $d=\dfrac{bc}{a}$, so the matrix can be written as
$$A=\begin{bmatrix}a&b\\c&d\end{bmatrix}=\begin{bmatrix}a&b\\ac/a&bc/a\end{bmatrix}=\begin{bmatrix}a&b\\ka&kb\end{bmatrix}$$
where $k=\dfrac{c}{a}$. In other words, the second row of $A$ is a multiple of the first. If $A$ has an inverse $\begin{bmatrix}w&x\\y&z\end{bmatrix}$, then
$$\begin{bmatrix}a&b\\ka&kb\end{bmatrix}\begin{bmatrix}w&x\\y&z\end{bmatrix}=\begin{bmatrix}1&0\\0&1\end{bmatrix}$$
and the corresponding system of linear equations
\begin{align*}
aw+by&=1 \\
ax+bz&=0 \\
kaw+kby&=0 \\
kax+kbz&=0
\end{align*}
has no solution. \\
If $a=0$, then $ad-bc=0$ implies that $bc=0$, and therfore either $b$ or $c$ is $0$. Thus, $A$ is of the form
$$\begin{bmatrix}0&0\\c&d\end{bmatrix}\qquad\text{or}\qquad\begin{bmatrix}0&b\\0&d\end{bmatrix}$$
In the first case, $\begin{bmatrix}0&0\\c&d\end{bmatrix}\begin{bmatrix}w&x\\y&z\end{bmatrix}=\begin{bmatrix}0&0\\ *&*\end{bmatrix}\ne\begin{bmatrix}1&0\\0&1\end{bmatrix}$ Similarly $\begin{bmatrix}0&b\\0&d\end{bmatrix}$ cannot have an inverse. \\
Consequently, if $ad-bc=0$, then $A$ is not invertible.
\end{proof}
\begin{definition}{Definition: Determinate}
Let $A=\begin{bmatrix}a&b\\c&d\end{bmatrix}$. The \textbf{determinate} of $A$, denoted $\text{det}A$ is:
$$\text{det}A=ad-bc=\abs{\begin{matrix}a&b\\c&d\end{matrix}}$$
\end{definition}
\begin{example}{Example: Finding The Inverse Of A \text{$2\times2$} Matrix}
Find $A^{-1}$ or determine $A$ is not invertible: \\ \newline
\begin{tblr}{width = \linewidth, colspec={Q|Q|X}, row{1} = {valign=m,halign=c}, row{2} = {valign=t,halign=l}, row{3} = {valign=m,halign=c}}
{$A=\begin{bmatrix}1&-1\\2&1\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}\text{det}A&=1\(1\)-\(-1\)\(2\)\\ &=3\end{aligned}$ \\ \vspace{2.5ex}
$\begin{aligned}
A^{-1}&=\frac{1}{1\(1\)-\(-1\)\(2\)}\begin{bmatrix}1&1\\-2&1\end{bmatrix} \\
&=\frac{1}{3}\begin{bmatrix}1&1\\-2&1\end{bmatrix}
\end{aligned}$\vspace{2.5ex}} &
{$A=\begin{bmatrix}-1&1\\1&-1\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}\text{det}A&=\(-1\)\(-1\)-\(1\)\(1\)\\ &=0\end{aligned}$ \\ \vspace{2.5ex}
$A$ is Not Invertible} &
{$A=\begin{bmatrix}x&kx\\y&ky\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}\text{det}A&=kxy-kxy\\ &=0\end{aligned}$\\ \vspace{2.5ex}
$A$ is Not Invertible} \\
{Verify: $A^{-1}A$\vspace{1ex}} & {} \\
{$\displaystyle{\frac{1}{3}\begin{bmatrix}1&1\\-2&1\end{bmatrix}\begin{bmatrix}1&-1\\2&1\end{bmatrix}=\frac{1}{3}\begin{bmatrix}3&0\\0&3\end{bmatrix}=I_2\checkmark}$}
\end{tblr} \\ \newline
\textbf{Conclusion:} If $A$ is a $n\times n$ matrix with linearly dependent columns, then $A$ is not invertible.
\end{example}
\begin{theorem}{Theorem: If A Matrix Is Invertible Then It Is Linearly Independent}
If $A$ is an invertible $n\times n$ matrix, then for each $\vv{b}\in\mathbb{R}^n$, the linear system $A\vv{x}=\vv{b}$ has the unique solution $\vv{x}=A^{-1}\vv{b}$.
\begin{align*}
A\vv{x}&=\vv{b} & A\(A^{-1}\vv{b}\)&=\(AA^{-1}\)\vv{b} \\
A^{-1}A\vv{x}&=A^{-1}\vv{b} & &=I_n\vv{b} \\
I_n\vv{x}&=A^{-1}\vv{b} & &=\vv{b}\checkmark \\
\vv{x}&=A^{-1}\vv{b}
\end{align*}
\end{theorem}
\begin{proof}
We are asked to prove two things: that $A\vv{x}=\vv{b}$ has a solution and that it has only one solution. (In mathematics, such a proof is called an “existence and uniqueness” proof.) \\ \newline
To show that a solution exists, we need only verify that $\vv{x}=A^{-1}\vv{b}$ works. We check that
$$A\(A^{-1}\vv{b}\)=\(AA^{-1}\)\vv{b}=I\vv{b}=\vv{b}$$
So $A^{-1}\vv{b}$ satisfies the equation $A\vv{x}=\vv{b}$, and hence there is at least this solution. \\ \newline
To show that this solution is unique, suppose $\vv{y}$ is another solution. Then $A\vv{y}=\vv{b}$, and multiplying both sides of the equation by $A^{-1}$ on the left, we obtain the chain of implications
\begin{align*}
A\vv{y}&=\vv{b} \\
A^{-1}\(A\vv{y}\)&=A^{-1}\vv{b} \\
\(A^{-1}A\)\vv{y}&=A^{-1}\vv{b} \\
I\vv{y}&=A^{-1}\vv{b} \\
\vv{y}&=A^{-1}\vv{b}
\end{align*}
Thus, $\vv{y}$ is the same solution as before, and therefore the solution is unique.
\end{proof}
\begin{example}{Example: Using The Inverse To Solve A Linear System}
Find $\begin{bmatrix}5&3\\3&2\end{bmatrix}^{-1}$ then use the inverse to solve. \\ \newline
Find determinate:
$$\text{det}\begin{bmatrix}5&3\\3&2\end{bmatrix}=5\(2\)-3\(3\)=1$$
Find Inverse:
$$\begin{bmatrix}5&3\\3&2\end{bmatrix}^{-1}=\frac{1}{1}\begin{bmatrix}2&-3\\-3&5\end{bmatrix}$$
Solve the Linear Systems:
\begin{align*}
\begin{bmatrix}5&3\\3&2\end{bmatrix}\vv{x}&=\begin{bmatrix}1\\2\end{bmatrix} & \begin{bmatrix}5&3\\3&2\end{bmatrix}\vv{x}&=\begin{bmatrix}0\\0\end{bmatrix} \\
\vv{x}&=\begin{bmatrix}2&-3\\-3&5\end{bmatrix}\begin{bmatrix}1\\2\end{bmatrix} & \vv{x}&=\begin{bmatrix}2&-3\\-3&5\end{bmatrix}\begin{bmatrix}0\\0\end{bmatrix} \\
\vv{x}&=\begin{bmatrix}-4\\7\end{bmatrix} & \vv{x}&=\begin{bmatrix}0\\0\end{bmatrix}
\end{align*}
\end{example}
Note:
\begin{itemize}
\item {If $A$ is $n\times n$ and invertible, then $A\vv{x}=\vv{0}$ has only the trivial solution of $\vv{x}=\vv{0}$ or in other words the columns of $A$ are linearly independent.}
\end{itemize}
\begin{theorem}{Theorem: Properties Of Invertible Matrices}
Let $A$ and $B$ be invertible $n\times n$ matrices. Let $c$ be a nonzero scalar. Let $k$ be a nonnegatve integer. Then:
\begin{enumerate}
\item $\(A^{-1}\)^{-1}=A$
\item $\(AB\)^{-1}=B^{-1}A^{-1}$
\item $\(A^T\)^{-1}=\(A^{-1}\)^T$
\item $\displaystyle{\(cA\)^{-1}=\frac{1}{c}A^{-1}}$
\item $\(A^k\)^{-1}=\(A^{-1}\)^k=A^{-k}$
\end{enumerate}
Remember:
$$AA^{-1}=A^{-1}A=I_n$$
\end{theorem}
\begin{proof} \newline
\begin{itemize}
\item $\(A^{-1}\)^{-1}=A$ \\
To show that $A^{-1}$ is invertible, we must argue that there is a matrix $X$ such that
$$A^{-1}X=I=XA^{-1}$$
But $A$ certainly satisfies these equations in place of $X$, so $A^{-1}$ is invertible and $A$ is an inverse of $A^{-1}$. Since inverses are unique, this means that $\(A^{-1}\)^{-1}=A$.
\item $\(AB\)^{-1}=B^{-1}A^{-1}$\\
Here we must show that there is a matrix $X$ such that
$$\(AB\)X=I=X\(AB\)$$
The claim is that substituting $B^{-1}A^{-1}$ for $X$ works. We chack that
$$\(AB\)\(B^{-1}A^{-1}\)=A\(BB^{-1}\)A^{-1}=AIA^{-1}=AA^{-1}=I$$
where we have used associativity to shift the parentheses. Similarly, $\(B^{-1}A^{-1}\)\(AB\)=I$, so $AB$ is invertible and its inverse is $B^{-1}A^{-1}$.
\item $\(A^n\)^{-1}=\(A^{-1}\)^n$ \\
The basis step is when $n=0$, in which case we are being asked to prove that $A^0$ is invertible and that
$$\(A^0\)^{-1}=\(A^{-1}\)^0$$
This is the same as showing that $I$ is invertible and that is $I^{-1}=I$, which is clearly true. \\ \newline
Now we assume that the result is true when $n=k$, where $k$ is a specific nonnegative integer. That is, the induction hypothesis is to assume that $A^k$ is invertible and that
$$\(A^k\)^{-1}=\(A^{-1}\)^k$$
The induction step requires that we prove that $A^{k+1}$ is invertible and that $\(A^{k+1}\)^{-1}=\(A^{-1}\)^{k+1}$. Now we know that $A^{k+1}=A^kA$ (from the proof directly above) is invertible, since $A$ and (by hypothesis) $A^k$ are both invertible. Moreover,
\begin{align*}
\(A^{-1}\)^{k+1}&=\(A^{-1}\)^kA^{-1} \\
&=\(A^k\)^{-1}A^{-1}\text{ (by the Induction Hypothesis)} \\
&=\(AA^k\)^{-1} \text{ (by the proof directly above)} \\
&=\(A^{k+1}\)^{-1}
\end{align*}
Therfore, $A^n$ is invertible for all nonnegative integers $n$, and $\(A^n\)^{-1}=\(A^{-1}\)^n$ by the principle of mathematical induction.
\end{itemize}
\end{proof}
\begin{theorem}{Theorem: A Matrix Is Invertible If It Is Row Equivalent To $I_n$}
An $n\times n$ matrix $A$ is invertible if and only if $\underbrace{A \text{ is row equivalant to } I_n}_{I_n \text{ is the RREF of } A}$.
\end{theorem}
\begin{example}{Example: How Can We Find $A^{-1}$ (if $A^{-1}$ exists)?}
Consider a $3\times 3$ matrix $A$. Suppose $A^{-1}$ exists. Then:
\begin{align*}
AA^{-1}&=I_n \\
A\begin{bmatrix}\vv{x_1} & \vv{x_2} & \vv{x_3}\end{bmatrix} &= \begin{bmatrix}1&0&0\\0&1&0\\0&0&1\end{bmatrix} \\
\begin{bmatrix}A\vv{x_1} & A\vv{x_2} & A\vv{x_3}\end{bmatrix} &=\begin{bmatrix}1&0&0\\0&1&0\\0&0&1\end{bmatrix}
\end{align*}
This results in 3 linear systems:
$$A\vv{x_1}=\begin{bmatrix}1\\0\\0\end{bmatrix}\hspace{1cm}A\vv{x_2}=\begin{bmatrix}0\\1\\0\end{bmatrix}\hspace{1cm}A\vv{x_3}=\begin{bmatrix}0\\0\\1\end{bmatrix}$$
We can solve these linear systems to find the columns of $A^{-1}$.
\end{example}
\begin{example}{Example: Finding $A^{-1}$}
Let $A=\begin{bmatrix}-2&5\\-1&2\end{bmatrix}$. Solve $A\vv{x_1}=\begin{bmatrix}1\\0\end{bmatrix}$ and $A\vv{x_2}=\begin{bmatrix}0\\1\end{bmatrix}$.
\begin{align*}
\begin{bmatrix}-2&5&\aug&1\\-1&2&\aug&0\end{bmatrix}\xrightarrow{R_1\leftrightarrow R_2}&\begin{bmatrix}-1&2&\aug&0\\-2&5&\aug&1\end{bmatrix} & \begin{bmatrix}-2&5&\aug&0\\-1&2&\aug&1\end{bmatrix}\xrightarrow{R_1\leftrightarrow R_2}&\begin{bmatrix}-1&2&\aug&1\\-2&5&\aug&0\end{bmatrix} \\
\xrightarrow{-R_1}&\begin{bmatrix*}[r]1&-2&\aug&0\\-2&5&\aug&1\end{bmatrix*} & \xrightarrow{-R_1}&\begin{bmatrix*}[r]1&-2&\aug&-1\\-2&5&\aug&0\end{bmatrix*} \\
\xrightarrow{R_2+2R_1}&\begin{bmatrix*}[r]1&-2&\aug&0\\0&1&\aug&1\end{bmatrix*} & \xrightarrow{R_2+2R_1}&\begin{bmatrix*}[r]1&-2&\aug&-1\\0&1&\aug&-2\end{bmatrix*} \\
\xrightarrow{R_1+2R_2}&\begin{bmatrix*}[r]1&0&\aug&2\\0&1&\aug&1\end{bmatrix*} & \xrightarrow{R_1+2R_2}&\begin{bmatrix*}[r]1&0&\aug&-5\\0&1&\aug&-2\end{bmatrix*}
\end{align*}
Notice that both of the row reductions are the same steps so instead we can solve both systems at once
$$\begin{bmatrix}A&\!\!\!\left|\begin{matrix}\phantom{}\\\phantom{}\end{matrix}\right.\!\!\!&\begin{matrix}1&0\\0&1\end{matrix}\end{bmatrix}\xrightarrow{Row\hspace{1ex}Reduce}\begin{bmatrix*}[r]1&0&\aug&2&-5\\0&1&\aug&1&-2\end{bmatrix*}$$
Verify $A^{-1}=\begin{bmatrix}2&-5\\1&-2\end{bmatrix}$: $AA^{-1}=\begin{bmatrix}-2&5\\-1&2\end{bmatrix}\begin{bmatrix}2&-5\\1&-2\end{bmatrix}=\begin{bmatrix}1&0\\0&1\end{bmatrix}=I_2\checkmark$
\end{example}
For an $n\times n$ matrix, to find $A^{-1}$ or determine $A$ is not invertible:
\begin{enumerate}
\item Augment $A$ with $I_n$ $\begin{bmatrix}A&\aug&I_n\end{bmatrix}$
\item {Row Reduce $\begin{bmatrix}A&\aug&I_n\end{bmatrix}$ until either:
\begin{enumerate}
\item We determine $A$ is not row equivalent to $I_n$ thus $A$ is not invertible.
\item We get RREF of $\begin{bmatrix}A&\aug&I_n\end{bmatrix}$ and have $\begin{bmatrix}I_n&\aug&A^{-1}\end{bmatrix}$.
\end{enumerate}}
\end{enumerate}
\begin{example}{Example: Finding $A^{-1}$ At Once}
Find $A^{-1}$ or determine $A$ is not invertible: \\ \newline
\begin{tblr}{width = \linewidth, cells = {halign = c, valign = t}, colspec = {X|X}}
{$A=\begin{bmatrix}1&0&2\\1&1&2\\0&1&1\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}
\begin{bmatrix}A&\aug&I_3\end{bmatrix}=&\begin{bmatrix}1&0&2&\aug&1&0&0\\1&1&2&\aug&0&1&0\\0&1&1&\aug&0&0&1\end{bmatrix} \\
\xrightarrow{R_2-R_1}&\begin{bmatrix*}[r]1&0&2&\aug&1&0&0\\0&1&0&\aug&-1&1&0\\0&1&1&\aug&0&0&1\end{bmatrix*} \\
\xrightarrow{R_3-R_2}&\begin{bmatrix*}[r]1&0&2&\aug&1&0&0\\0&1&0&\aug&-1&1&0\\0&0&1&\aug&1&-1&1\end{bmatrix*} \\
\xrightarrow{R_1-2R_3}&\begin{bmatrix*}[r]1&0&0&\aug&-1&2&-2\\0&1&0&\aug&-1&1&0\\0&0&1&\aug&1&-1&1\end{bmatrix*}
\end{aligned}$ \\ \vspace{2.5ex}
$A=\begin{bmatrix}1&0&2\\1&1&2\\0&1&1\end{bmatrix} \hspace{1cm} A^{-1}=\begin{bmatrix*}[r]-1&2&-2\\-1&1&0\\1&-1&1\end{bmatrix*}$
\parbox{\linewidth}{\begin{flushleft}
Verify: $A^{-1}A=\begin{bmatrix}1&0&0\\0&1&0\\0&0&1\end{bmatrix}=I_3\checkmark$
\end{flushleft}}} & {$A=\begin{bmatrix}1&0&3&4\\0&1&1&1\\0&0&1&0\\0&1&2&1\end{bmatrix}$ \\ \vspace{2.5ex}
$\begin{aligned}
\begin{bmatrix}A&\aug&I_4\end{bmatrix}=&\begin{bNiceArray}{rrrr | rrrr}
1&0&3&4&1&0&0&0\\
0&1&1&1&0&1&0&0\\
0&0&1&0&0&0&1&0\\
0&\underline{1}&2&1&0&0&0&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceArray} \\
\xrightarrow{R_4-R_2}&\begin{bNiceArray}{rrrr | rrrr}
1&0&3&4&1&0&0&0\\
0&1&1&1&0&1&0&0\\
0&0&1&0&0&0&1&0\\
0&0&1&0&0&-1&0&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceArray} \\
\xrightarrow{R_4-R_3}&\begin{bNiceArray}{rrrr | rrrr}
1&0&3&4&1&0&0&0\\
0&1&1&1&0&1&0&0\\
0&0&1&0&0&0&1&0\\
0&0&0&\underline{0}&0&-1&-1&1
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\tikz \draw (4-6) circle (3mm) ;
\end{bNiceArray}
\end{aligned}$
\parbox{\linewidth}{\begin{flushleft}
$A$ is not row equivalent to $I_4$ so $A^{-1}$ DNE.
\end{flushleft}}}
\end{tblr}
\end{example}
\begin{theorem}{Theorem: The Fundamental Theorem Of Invertible Matrices (FTIM)}
Let $A$ be an $n\times n$ matrix. The following statements are equivalent (either all are true or all are false):
\begin{enumerate}
\item $A$ is invertible, ($A^{-1}$ exists)
\item $A$ is row equivalent to $I_n$. ($I_n$ is the RREF of $A$)
\item rank$A=n$
\item $A\vv{x}=\vv{b}$ has a solution for every $\vv{b}\in\mathbb{R}^n$. (more specifically, there is one unique solution $\vv{x}=A^{-1}\vv{b}$.)
\item $A\vv{x}=\vv{0}$ has only the trivial solution $\vv{x}=\vv{0}$.
\item The columns of $A$ span $\mathbb{R}^n$
\item The columns of $A$ are linearly independent.
\end{enumerate}
\end{theorem}
Notes:
\begin{itemize}
\item FTIM only applies to $n\times n$ matrices.
\item The negation of the FTIM gives a list of equivalent statements about noninvertible matrices.
\end{itemize}
\begin{example}{Example: FTIM Negation}
Let $A$ and $B$ be $n\times n$ matrices. If $B$ is not invertible, is $AB$ invertible?
\begin{align*}
B \text{ is not invertible }\Leftrightarrow&\text{ By FTIM, } B\vv{x}=\vv{0} \text{ has infinitely many solutions} \\
\Leftrightarrow& AB\vv{x}=A\vv{0} \text{ has infinitely many solutions} \\
\Leftrightarrow& \(AB\)\vv{x}=\vv{0}\text{ has infinitely many solutions} \\
\Leftrightarrow& \text{ By FTIM, $AB$ is not invertible.}
\end{align*}
\end{example}
\newpage
\section{Section 3.5 Subspaces}
\begin{definition}{Definition: Subspace}
A set of vectors $W $ in $\mathbb{R}^n$ is a \textbf{subspace} of $\mathbb{R}^n$ if:
\begin{enumerate}
\item $\vv{0}$ is in $W$
\item $W$ is closed under addition: For vectors $\vv{u}$ and $\vv{v}$ that are in $W$, then $\vv{u}+\vv{v}$ is also in $W$.
\item $W$ is also closed under scalar multiplication: For all $\vv{v}$ in $W$ and $c\in\mathbb{R}$, then $c\vv{v}$ is in $W$.
\end{enumerate}
\begin{example}{Example: Subspaces}
\begin{enumerate}
\item $\mathbb{R}^n$ is a subspace of $\mathbb{R}^n$.
\item $\left\{\vv{0}\right\}$ is a subspace of $\mathbb{R}^n$ (called the \textbf{zero space} of $\mathbb{R}^n$)
\end{enumerate}
\end{example}
\end{definition}
\begin{example}{Example: Not A Subspace Through Counter Example}
Is the set $W=\left\{\begin{bmatrix}0\\0\\0\end{bmatrix},\begin{bmatrix}1\\1\\1\end{bmatrix},\begin{bmatrix}1\\0\\1\end{bmatrix}\right\}$ a subspace of $\mathbb{R}^3$? \\ \newline
$W$ is not closed under addition because $\begin{bsmallmatrix}1\\1\\1\end{bsmallmatrix}$ and $\begin{bsmallmatrix}1\\0\\1\end{bsmallmatrix}$ are in $W$ but $\begin{bsmallmatrix}1\\1\\1\end{bsmallmatrix}+\begin{bsmallmatrix}1\\0\\1\end{bsmallmatrix}=\begin{bsmallmatrix}2\\1\\2\end{bsmallmatrix}$ is not in $W$. \\ \newline
$W$ is also not closed under scalar multiplication as $\begin{bsmallmatrix}1\\1\\1\end{bsmallmatrix}$ is in $W$ but $2\begin{bsmallmatrix}1\\1\\1\end{bsmallmatrix}=\begin{bsmallmatrix}2\\2\\2\end{bsmallmatrix}$ is not in $W$. \\ \newline
Thus $W$ is not a subspace of $\mathbb{R}^3$.
\end{example}
Notes:
\begin{itemize}
\item $\left\{\vv{0}\right\}$ is the only finite subsace of $\mathbb{R}^n$. All other subspaces of $\mathbb{R}^n$ contain infinitely many vectors.
\end{itemize}
\begin{example}{Example: Determine If A Set Is A Subspace}
Sketch each set in $\mathbb{R}^2$. Determine if the set is a subspace of $\mathbb{R}^2$.
\begin{enumerate}
\item {$S_1=\left\{\begin{bmatrix}x\\y\end{bmatrix}\in\mathbb{R}^2\Big|x+y=1\right\}$ \\
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2, ymax = 2,
xmin = -2, xmax = 2,
xtick = {1},
ytick = {1},
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(0,1)};
\addplot[mark=*, ultra thick] coordinates {(1,0)};
\addplot [
black,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -1:2,
]
{1-x};
\end{axis}
\end{tikzpicture}
\end{center}
$\vv{0}$ is not in $S_1$ $(0+0\ne1)$ so $S_1$ is not a subspace.}
\item{$S_2=\left\{\begin{bmatrix}x\\y\end{bmatrix}\in\mathbb{R}^2\Big|x^2-y^2=0\right\}$ \\
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(0,0)};
\addplot [
black,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{x} node [right] {$y=x$};
\addplot [
black,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{-x} node[left, pos = 0] {$y=-x$};
\draw[red, ultra thick, -Stealth] (0,0) -- (1,1);
\draw[red, ultra thick, -Stealth] (0,0) -- (-1,1);
\draw[blue, ultra thick, -Stealth] (0,0) -- (0,2);
\end{axis}
\end{tikzpicture}
\end{center}
$\vv{0}$ is in $S_2$ because $0^2-0^2=0$ \\ \newline $\begin{bsmallmatrix}1\\1\end{bsmallmatrix}$ and $\begin{bsmallmatrix}-1\\1\end{bsmallmatrix}$ are in $S_2$ but $\begin{bsmallmatrix}1\\1\end{bsmallmatrix}+\begin{bsmallmatrix}-1\\1\end{bsmallmatrix}=\begin{bsmallmatrix}0\\2\end{bsmallmatrix}$ is not in $S_2$ $\(0^2-2^2\ne0\)$ so $S_2$ is \textbf{not} closed under addition, thus $S_2$ is \textbf{not} a subspace of $\mathbb{R}^2$.}
\item{$S_3=\left\{\begin{bmatrix}x\\y\end{bmatrix}\in\mathbb{R}^2\Big|x\ge0\text{ and }y\ge0\right\}$ \\
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\fill[red, fill=red!30, draw = red] (0,0) rectangle (2,2);
\draw[thick, -Stealth] (0,0) -- (1,1);
\draw[thick, -Stealth] (0,0) -- (-2,-2);
\end{axis}
\end{tikzpicture}
\end{center}
$\begin{bsmallmatrix}1\\1\end{bsmallmatrix}$ is in $S_3$ $\(1\ge0\)$, and $-2\in\mathbb{R}$, however $-2\begin{bsmallmatrix}1\\1\end{bsmallmatrix}=\begin{bsmallmatrix}-2\\-2\end{bsmallmatrix}$ is \textbf{not} in $S_3$. Therfore $S_3$ is not closed under scalar multiplication so $S_3$ is \textbf{not} a subspace.}
\item{$S_4=\left\{\begin{bmatrix}x\\y\end{bmatrix}\in\mathbb{R}^2\Big|x-y=0\right\}$ \\
\begin{center}
\begin{tikzpicture}
\begin{axis} [
width = 0.5\linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(0,0)};
\addplot [
black,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{x};
\end{axis}
\end{tikzpicture}
\end{center}
\begin{enumerate}
\item {$\begin{bmatrix}0\\0\end{bmatrix}$ is in $S_4$ because $0-0=0$.}
\item {Is $S_4$ closed under addition? \\ \newline
Let $\begin{bmatrix}x_1\\y_1\end{bmatrix},\begin{bmatrix}x_2\\y_2\end{bmatrix}\in S_4$. Then $x_1-y_1=0$ and $x_2-y_2=0$. Is $\begin{bmatrix}x_1\\y_1\end{bmatrix}+\begin{bmatrix}x_2\\y_2\end{bmatrix}=\begin{bmatrix}x_1+x_2\\y_1+y_2\end{bmatrix}$ in $S_4$?
\begin{align*}
\(x_1+x_2\)-\(y_1+y_2\)&=x_1+x_2-y_1-y_2 \\
&=x_1-y_1+x_2-y_2 \\
&=0 + 0 \\
&= 0
\end{align*}
So $\begin{bmatrix}x_1+x_2\\y_1+y_2\end{bmatrix}$ is in $S_4$ so $S_4$ is closed under addition.}
\item {Is $S_4$ closed under scalar multiplication. Let $\begin{bmatrix}x\\y\end{bmatrix}\in S_4$ and $c\in\mathbb{R}$. Then $x-y=0$. Is $c\begin{bmatrix}x\\y\end{bmatrix}=\begin{bmatrix}cx\\cy\end{bmatrix}$ in $S_4$?
\begin{align*}
cx-cy&=c\(x-y\) \\
&=c\(0\) \\
&=0
\end{align*}
$c\begin{bmatrix}x\\y\end{bmatrix}$ is in $S_4$ so $S_4$ is closed under scalar multiplication.}
\end{enumerate}
Therfore $S_4$ is a subspace of $\mathbb{R}^2$ as $\vv{0}$ is in $S_4$, $S_4$ is closed under addition and also closed under scalar multiplication.}
\end{enumerate}
\end{example}
\begin{example}{Example: Is \text{$A\vv{x}=\vv{0}$} A Subspace?}
Let $A$ be an $m\times n$ matrix. Let $G=\left\{\vv{x}\in\mathbb{R}^n\Big|A\vv{x}=\vv{0}\right\}$. Is $G$ a subspace of $\mathbb{R}^n$?
\begin{enumerate}
\item {$\vv{0}$ is in $G$ because, $A\vv{0}=\vv{0}\checkmark$.}
\item {Is $G$ closed under addition? Let $\vv{x},\vv{y}\in G$ then $A\vv{x}=\vv{0}$ and $A\vv{y}=\vv{0}$. Is $\vv{x}+\vv{y}$ in $G$?
\begin{align*}
A\(\vv{x}+\vv{y}\)&=A\vv{x}+A\vv{y} \\
&=\vv{0}+\vv{0}\\
&=\vv{0}\checkmark
\end{align*}
$\vv{x}+\vv{y}$ is in $G$ so $G$ is closed under addition.}
\item {Is $G$ closed under scalar multiplication? Let $\vv{x}\in G$ and $c\in\mathbb{R}$ then $A\vv{x}=\vv{0}$. Is $c\vv{x}$ in $G$?
\begin{align*}
A\(c\vv{x}\)&=cA\vv{x} \\
&=c\(\vv{0}\) \\
&=\vv{0}\checkmark
\end{align*}
$c\vv{x}$ is in $G$ so $G$ is closed under scalar multiplication.}
\end{enumerate}
Therfore $G$ is a subspace of $\mathbb{R}^n$.
\end{example}
Notes:
\begin{itemize}
\item {In the previous example $G$ is the solution set to the homogeneous linear system $A\vv{x}=\vv{0}$}
\item {The solution set to $A\vv{x}=\vv{0}$ is a subspace called the \textbf{null space} of $A$.}
\end{itemize}
\begin{example}{Example: Is span\text{$\(\vv{u},\vv{v}\)$} A Subspace Of $\mathbb{R}^6$?}
Let $\vv{u}$ and $\vv{v}$ be vectors in $\mathbb{R}^6$. Is span$\(\vv{u},\vv{v}\)$ a subspace of $\mathbb{R}^6$?
$$\text{span}\(\vv{u},\vv{v}\)=\left\{c\vv{u}+d\vv{v}\Big|c,d\in\mathbb{R}\right\}$$
\begin{enumerate}
\item {Is $\vv{0}$ in span$\(\vv{u},\vv{v}\)$? \begin{center}$\vv{0}=0\vv{u}+0\vv{v}$ so $\vv{0}$ is in span$\(\vv{u},\vv{v}\)\checkmark$.\end{center}}
\item {Is span$\(\vv{u},\vv{v}\)$ closed under addition? \\ \newline
Let $\vv{x},\vv{y}\in\text{span}\(\vv{u},\vv{v}\)$. Then:
$$\vv{x}=c_1\vv{u}+d_1\vv{v}\quad\text{ and }\quad\vv{y}=c_2\vv{u}+d_2\vv{v}$$
Is $\vv{x}+\vv{y}$ in span$\(\vv{u},\vv{v}\)$?
\begin{align*}
\vv{x}+\vv{y}&=c_1\vv{u}+d_1\vv{v}+c_2\vv{u}+d_2\vv{v} \\
&=c_1\vv{u}+c_2\vv{u}+d_1\vv{v}+d_2\vv{v} \\
&=\(c_1+c_2\)\vv{u}+\(d_1+d_2\)\vv{v}\checkmark \\
\end{align*}
$\vv{x}+\vv{y}$ is in span$\(\vv{u},\vv{v}\)$ so span$\(\vv{u},\vv{v}\)$ is closed under addition.}
\item {Is span$\(\vv{u},\vv{v}\)$ closed under scalar multiplication? \\ \newline
Let $\vv{x}\in\text{span}\(\vv{u},\vv{v}\)$ and $k\in\mathbb{R}$. Then:
$$\vv{x}=c\vv{u}+d\vv{v}$$
Is $k\vv{x}$ in span$\(\vv{u},\vv{v}\)$?
$$k\vv{x}=k\(c\vv{u}+d\vv{v}\)=\(kc\)\vv{u}+\(kd\)\vv{v}\checkmark$$
Thus $k\vv{x}$ is in span$\(\vv{u},\vv{v}\)$ so span$\(\vv{u},\vv{v}\)$ is closed under scalar multiplication.}
\end{enumerate}
Therfore, span$\(\vv{u},\vv{v}\)$ is a subspace of $\mathbb{R}^6$.
\end{example}
\begin{theorem}{Theorem: A span Is A Subspace}
Let $\vv{v_1},\vv{v_2},\hdots,\vv{v_k'}$ be vectors in $\mathbb{R}^n$. Then the set span$\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)$ is a subspace of $\mathbb{R}^n$.
\end{theorem}
\begin{example}{Example: Spans Are A Subspace}
\begin{itemize}
\item span$\(\begin{bmatrix}1\\3\\-1\\2\\0\end{bmatrix},\begin{bmatrix}2\\-1\\1\\0\\1\end{bmatrix}\)$ is a subspace of $\mathbb{R}^5$.
\item span$\(\begin{bmatrix}1\\2\end{bmatrix},\begin{bmatrix}3\\1\end{bmatrix},\begin{bmatrix}4\\6\end{bmatrix}\)$ is a subspace of $\mathbb{R}^2$.
\end{itemize}
\end{example}
\begin{definition}{Definition: Spanning Set}
Let $W$ be a subspace of $\mathbb{R}^n$. We say the vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ in $W$ span/generate $W$ if $$\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)=W$$. We call $\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\}$ a \textbf{spanning set} for $W$.
\begin{example}{Example: Spanning Set}
\begin{tblr}{width = \linewidth, colspec = {XX}}
{$\begin{aligned}
S_4&=\left\{\begin{bmatrix}x\\y\end{bmatrix}\in\mathbb{R}^2\Big|x-y=0\right\} \\
S_4&=\text{span}\(\begin{bmatrix}1\\1\end{bmatrix}\)
\end{aligned}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(0,0)};
\addplot [
black,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{x};
\draw[ultra thick, -Stealth, red] (0,0) -- (1,1);
\end{axis}
\end{tikzpicture}}}
\end{tblr}
$\left\{\begin{bmatrix}1\\1\end{bmatrix}\right\}$ is a spanning set for $S_4$, as $\begin{bmatrix}1\\1\end{bmatrix}$ spans $S_4$. With that said $$\text{span}\(\begin{bmatrix}0\\0\end{bmatrix}\)=\left\{\begin{bmatrix}0\\0\end{bmatrix}\right\}\ne S_4$$ and $\left\{\begin{bmatrix}1\\1\end{bmatrix},\begin{bmatrix}2\\2\end{bmatrix}\right\}$ is also a spanning set for $S_4$.
\end{example}
$\star$ Note: Spanning sets are not unique. The number of vectors in a spanning set is not unique.
\end{definition}
\begin{example}{Example: Finding A Spanning Set Of A Matrix}
$A=\begin{bNiceMatrix}[first-row]
\downarrow&&\downarrow\\
1&0&2&0\\
2&0&1&3\\
3&0&-1&7
\end{bNiceMatrix}\xrightarrow{Row\hspace{1ex}Reduce}\begin{bNiceMatrix}[first-row]
\downarrow&&\downarrow\\
1&0&0&2\\
0&0&1&-1\\
0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\end{bNiceMatrix}=U$
\begin{align*}
W&=\text{span}\(\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}0\\0\\0\end{bmatrix},\begin{bmatrix}2\\1\\-1\end{bmatrix},\begin{bmatrix}0\\3\\7\end{bmatrix}\) \\
&=\text{span}\(\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}2\\1\\-1\end{bmatrix}\)
\end{align*}
$\underbrace{\left\{\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}0\\0\\0\end{bmatrix},\begin{bmatrix}2\\1\\-1\end{bmatrix},\begin{bmatrix}0\\3\\7\end{bmatrix}\right\}}_{\text{Linearly Dependent}}$ is a spanning set for $W$. \\ \newline
$\underbrace{\left\{\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}2\\1\\-1\end{bmatrix}\right\}}_{\text{Linearly Independent}}$ is also a spanning set for $W$. \\
$\star$ Note: span$\(\begin{bmatrix}1\\2\\3\end{bmatrix},\begin{bmatrix}2\\1\\-1\end{bmatrix}\)\ne\text{span}\(\begin{bmatrix}1\\0\\0\end{bmatrix},\begin{bmatrix}0\\1\\0\end{bmatrix}\)$
\end{example}
To get a spanning set for the subspace $W$ that contains the fewest number of vectors, we want to find the \textbf{linearly independent spanning set (basis)} for $W$.
\begin{definition}{Definition: Basis}
Let $W$ be a subspace of $\mathbb{R}^n$ and assume $W\ne\left\{\vv{0}\right\}$. The ordered set of vectors \\ $\mathcal{B}=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ in $W$ is called a \textbf{basis} for $W$ if:
\begin{enumerate}
\item $W=\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)$
\item and $\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ is linearly independent
\end{enumerate}
\textbf{Special Case:} The empty set denoted $\emptyset$, is a basis for $\left\{\vv{0}\right\}$.
\begin{example}{Example: Different Basis Of $\mathbb{R}^2$}
Find 3 distinct bases for $\mathbb{R}^2$.
$$\left\{\begin{bmatrix}1\\1\end{bmatrix},\begin{bmatrix}1\\-1\end{bmatrix}\right\}\quad\left\{\begin{bmatrix}0\\1\end{bmatrix},\begin{bmatrix}1\\0\end{bmatrix}\right\}\quad\left\{\begin{bmatrix}1\\0\end{bmatrix},\begin{bmatrix}0\\1\end{bmatrix}\right\}$$
$\star$ Note: $\left\{\begin{bmatrix}0\\1\end{bmatrix},\begin{bmatrix}1\\0\end{bmatrix}\right\}$ and $\left\{\begin{bmatrix}1\\0\end{bmatrix},\begin{bmatrix}0\\1\end{bmatrix}\right\}$ are two different bases.
\end{example}
\end{definition}
\begin{definition}{Definition: Standard Basis}
The \textbf{standard basis} for $\mathbb{R}^n$ is the ordered set $\mathcal{B}=\left\{\vv{e_1},\vv{e_2},\hdots,\vv{e_n}\right\}$ where $\vv{e_i}$ is the $i^{\text{th}}$ column vector of $I_n$.
\begin{example}{Example: Standard Basis}
\begin{itemize}
\item {$\left\{\begin{bmatrix}1\\0\\0\end{bmatrix},\begin{bmatrix}0\\1\\0\end{bmatrix},\begin{bmatrix}0\\0\\1\end{bmatrix}\right\}$ is the standard basis for $\mathbb{R}^3$.}
\item {$\left\{\begin{bmatrix}1\\0\\0\\0\end{bmatrix},\begin{bmatrix}0\\1\\0\\0\end{bmatrix},\begin{bmatrix}0\\0\\1\\0\end{bmatrix},\begin{bmatrix}0\\0\\0\\1\end{bmatrix}\right\}$ is the standard basis for $\mathbb{R}^4$.}
\end{itemize}
\end{example}
\end{definition}
$\star$ Note: When $W$ is a nonzero subspace of $\mathbb{R}^n$, there are infinitely many bases for $W$.
\begin{example}{Example: How Many Ways Are There To Write A Vector In A Subspace?}
Consider the subspace $W$ of $\mathbb{R}^3$ where $\underbrace{\mathcal{B}=\left\{\vv{v_1},\vv{v_2}\right\}}_{W=\text{span}\(\vv{v_1},\vv{v_2}\)}$ is a basis for $W$. If $\vv{b}$ is a vector in $W$, how many solutions exist to the linear system $x_1\vv{v_1}+x_2\vv{v_2}=\vv{b}$?
\begin{itemize}
\item {A solution exists because $\vv{v_1}$ and $\vv{v_2}$ span $W$ (every vector in $W$ is a linear combination of $\vv{v_1}$ and $\vv{v_2}$).}
\item {One unique solution exists because $\vv{v_1}$ and $\vv{v_2}$ are linearly independent (there are no free variables in the linear system).}
\end{itemize}
\end{example}
\begin{theorem}{Theorem: There Is One Way To Write A Vector In A Subspace}
Let $W$ be a subspace of $\mathbb{R}^n$ where $W\ne\left\{\vv{0}\right\}$. Let $\mathcal{B}=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ be a basis for $W$. Let $\vv{v}$ be a vector in $W$ Then there is exactly one way to write $\vv{v}$ as a linear combination of $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$.
$$c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_k\vv{v_k}=\vv{v}$$
$c_1,c_2,\hdots,c_k$ are the \textbf{coordinates of $\vv{v}$ with respect to $\mathcal{B}$.} \\
$\begin{bmatrix}\vv{v}\end{bmatrix}\mathcal{B}=\begin{bmatrix}c_1\\c_2\\\vdots\\c_k\end{bmatrix}$ is the \textbf{coordinate vector of $\vv{v}$ with respect to $\mathcal{B}$}.
\end{theorem}
\begin{example}{Example: Finding The Coordinate Vector Graphically}
In $\mathbb{R}^2$: $\mathcal{B}=\left\{\vv{u},\vv{v}\right\}$ is a basis for $\mathbb{R}^2$.
\begin{center}
\begin{tikzpicture}[scale = 0.5]
\fill (0,0) circle[radius=7pt];
\draw[ultra thick, Stealth-Stealth, dotted] (-3,-6) -- (3,6);
\draw[ultra thick, -Stealth] (0,0) -- (1,2) node[right] {\vv{u}};
\draw[ultra thick, Stealth-Stealth, dotted] (-3,3) -- (3,-3);
\draw[ultra thick, -Stealth] (0,0) -- (2,-2) node[right] {\vv{v}};
\draw[dotted, ultra thick] (-1,-5) -- (-2,-4);
\draw[dotted, ultra thick] (-1,-5) -- (1,-1);
\draw[ultra thick, -Stealth] (0,0) -- (-1,-5) node[below right] {$\vv{w}=-2\vv{u}+\frac{1}{2}\vv{v}$};
\end{tikzpicture}
\end{center}
$\begin{bmatrix}\vv{w}\end{bmatrix}\mathcal{B}=\begin{bmatrix}-2\\1/2\end{bmatrix}$ and for $\mathcal{B}_2=\left\{\vv{v},\vv{u}\right\}$ then $\begin{bmatrix}\vv{w}\end{bmatrix}\mathcal{B}_2=\begin{bmatrix}1/2\\-2\end{bmatrix}$.
\end{example}
\begin{example}{Example: Finding The Coordinate Vector}
The set $\mathcal{B}=\left\{\begin{bmatrix}1\\0\\1\end{bmatrix},\begin{bmatrix}0\\-1\\1\end{bmatrix},\begin{bmatrix}1\\1\\1\end{bmatrix}\right\}$ is a basis for $\mathbb{R}^3$.
\begin{enumerate}
\item {Let $\vv{v}=\begin{bmatrix}10\\11\\6\end{bmatrix}$. Find $\begin{bmatrix}\vv{v}\end{bmatrix}\mathcal{B}$. \\ \newline
Firstly Solve:
$$x_1\begin{bmatrix}1\\0\\1\end{bmatrix}+x_2\begin{bmatrix}0\\-1\\1\end{bmatrix}+x_3\begin{bmatrix}1\\1\\1\end{bmatrix}=\begin{bmatrix}10\\11\\6\end{bmatrix}$$
Next row reduce to RREF:
$$\begin{bmatrix}1&0&1&\aug&10\\0&-1&1&\aug&11\\1&1&1&\aug&6\end{bmatrix}\xrightarrow{\text{Row Reduce}}\begin{bmatrix}1&0&0&\aug&3\\0&1&0&\aug&-4\\0&0&1&\aug&4\end{bmatrix}$$
Therefore $\[\vv{v}\]\mathcal{B}=\begin{bmatrix}3\\-4\\7\end{bmatrix}$.}
\item {Let $\[\vv{u}\]\mathcal{B}=\begin{bmatrix}2\\-1\\1\end{bmatrix}$. Find $\vv{u}$.
$$\vv{u}=2\begin{bmatrix}1\\0\\1\end{bmatrix}-1\begin{bmatrix}0\\-1\\1\end{bmatrix}+1\begin{bmatrix}1\\1\\1\end{bmatrix}=\begin{bmatrix}3\\2\\2\end{bmatrix}$$}
\end{enumerate}
\end{example}
$\star$ Note: Every subspace of $\mathbb{R}^n$ has a basis and bases for nonzero subspaces are not unique. However, every basis for a subspace $W$ contains the same number of vectors.
\begin{example}{Example: Every Bases Having The Same Number Of Vectors}
If $W$ is a subspace of $\mathbb{R}^5$ and $\mathcal{B}=\left\{\vv{u},\vv{v},\vv{w}\right\}$ is a basis for $W$, then every basis for $W$ contains exactly 3 vectors. In other words dim$W=3$.
\end{example}
\begin{definition}{Definition: Dimension}
Let $W$ be a subspace of $\mathbb{R}^n$. The number of vectors in a basis for $W$ is called the \textbf{dimension} of $W$, denoted dim$W$.
\begin{example}{Example: Dimension}
\begin{itemize}
\item dim$\left\{\vv{0}\right\}=0$
\item dim$\(\mathbb{R}^2\)=2$
\item dim$\(\mathbb{R}^n\)=n$
\item {\begin{tblr}{width = \linewidth, colspec = {XX}}
{$\begin{aligned}
W&=\text{span}\(\begin{bmatrix}1\\1\end{bmatrix}\) \\
\mathcal{B}_W&=\left\{\begin{bmatrix}1\\1\end{bmatrix}\right\}
\end{aligned}$ \\
$\text{dim}W=1$ \\
$W$ is a one-diminsional subspace of $\mathbb{R}^2$.} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
xtick = {-2,-1,1,2},
ytick = {-2,-1,1,2},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(0,0)};
\addplot [
black,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{x} node[right] {$W$};
\draw[ultra thick, -Stealth, red] (0,0) -- (1,1);
\end{axis}
\end{tikzpicture}}}
\end{tblr}}
\end{itemize}
\end{example}
\end{definition}
\begin{example}{Example: Finding The Basis Of A Matrix}
$A=\begin{bNiceMatrix}[first-row]
&&\xmark&\xmark&\\
3&-1&3&2&2 \\
2&1&2&3&3 \\
1&2&1&3&3 \\
4&1&4&5&6
\end{bNiceMatrix}\xrightarrow{Row\hspace{1ex}Reduce}\begin{bNiceMatrix}[first-row]
&&\downarrow&\downarrow&\\
1&0&1&1&0 \\
0&1&0&1&0 \\
0&0&0&0&1 \\
0&0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-5) circle (2mm) ;
\end{bNiceMatrix}=U$
\begin{align*}
W&=\text{span}\(\begin{bmatrix}3\\2\\1\\4\end{bmatrix},\begin{bmatrix}-1\\1\\2\\1\end{bmatrix},\begin{bmatrix}3\\2\\1\\4\end{bmatrix},\begin{bmatrix}2\\3\\3\\5\end{bmatrix},\begin{bmatrix}2\\3\\3\\6\end{bmatrix}\) \\
&=\text{span}\(\begin{bmatrix}3\\2\\1\\4\end{bmatrix},\begin{bmatrix}-1\\1\\2\\1\end{bmatrix},\begin{bmatrix}2\\3\\3\\6\end{bmatrix}\) \\
\mathcal{B}_W&=\left\{\begin{bmatrix}3\\2\\1\\4\end{bmatrix},\begin{bmatrix}-1\\1\\2\\1\end{bmatrix},\begin{bmatrix}2\\3\\3\\6\end{bmatrix}\right\}
\end{align*}
dim$W$=3, thus $W$ is a 3-dimensional subspace of $\mathbb{R}^4$.
\end{example}
\begin{theorem}{Theorem: If Vectors Span $W$}
Let $W$ be a subspace of $\mathbb{R}^n$ and suppose $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ are in $W$ as well as span $W$. Then:
\begin{enumerate}
\item dim$W\le k$
\item {and If dim$W=k$, then $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ are linearly independent.}
\end{enumerate}
\end{theorem}
\begin{theorem}{Theorem: If Vectors In $W$ Are Linearly Independnet}
Let $W$ be a subspace of $\mathbb{R}^n$ and suppose $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ are linearly independent vectors in $W$. Then:
\begin{enumerate}
\item dim$W\ge k$
\item {and If dim$W= k$, then $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ span$W$.}
\end{enumerate}
\end{theorem}
$\star$ Note: If $W$ is a subspace of $\mathbb{R}^n$, then $0\le\text{dim}W\le n$.
\begin{itemize}
\item If dim$W=0$, then $W=\left\{\vv{0}\right\}$.
\item If dim$W=n$, then $W=\mathbb{R}^n$.
\end{itemize}
\newpage
\subsection{Subspaces in $\mathbb{R}^3$ (Geometrically)}
\begin{example}{Example: Subspaces In $\mathbb{R}^3$ Geometrically}
\begin{tblr}{width = \linewidth, colspec={QQXQ}, cells = {halign = c, valign = m}, hlines, vlines}
{Set} & {Dimension} & {Plot} & {Description} \\
{$S_0=\left\{\vv{0}\right\}$} & {dim$\(S_0\)=0$} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
zmin = -2.5, zmax = 2.5,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
ticks = none,
3d box=background,
grid=major,
]
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)};
\end{axis}
\end{tikzpicture}}} & {Point/Origin}\\
{$S_1=\text{span}\(\vv{u}\)$ \\
$\(\vv{u}\ne0\)$} & {dim$\(S_1\)=1$} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
zmin = -2.5, zmax = 2.5,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
ticks = none,
3d box=background,
grid=major,
]
\addplot3 [red,domain=-2.5:2.5,samples y=1, Stealth-Stealth, ultra thick] (0,x,x);
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)};
\end{axis}
\end{tikzpicture}}} & {Line through Origin} \\
{$S_2=\text{span}\(\vv{u},\vv{v}\)$ \\
$\(\vv{u},\vv{v}\text{ are lin. indep}\)$} & {dim$\(S_2\)=2$} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
zmin = -2.5, zmax = 2.5,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
ticks = none,
3d box=background,
grid=major,
]
\fill[red, fill=red, draw = red, fill opacity=0.2, ultra thick] (2.5,-2.5,-2.5) -- (2.5,2.5,-2.5) -- (-2.5,2.5,2.5) -- (-2.5,-2.5,2.5) -- cycle;
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)};
\draw[black, ultra thick, -Stealth] (0,0,0) -- (0,2.5,0);
\draw[black, ultra thick, -Stealth] (0,0,0) -- (-1.25, 0,1.25);
\end{axis}
\end{tikzpicture}}} & {Plane through Origin} \\
{$S_3=\text{span}\(\vv{u},\vv{v},\vv{w}\)$ \\
$\(\vv{u},\vv{v},\vv{w}\text{ are lin. indep}\)$} & {dim$\(S_3\)=3$} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
zmin = -2.5, zmax = 2.5,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
ticks = none,
3d box=background,
grid=major,
]
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)};
\draw[draw =red,fill=red, fill opacity=0.2, ultra thick] (-2.5,-2.5,-2.5) -- (2.5,-2.5,-2.5) -- (2.5,-2.5,2.5) -- (-2.5,-2.5,2.5) -- cycle;
\draw[draw =red,fill=red, fill opacity=0.2, ultra thick] (2.5,-2.5,-2.5) -- (2.5,2.5,-2.5) -- (2.5,2.5,2.5) -- (2.5,-2.5,2.5) -- cycle;
\draw[draw =red,fill=red, fill opacity=0.2, ultra thick] (-2.5,-2.5,2.5) -- (-2.5,2.5,2.5) -- (2.5,2.5,2.5) -- (2.5,-2.5,2.5) -- cycle;
\end{axis}
\end{tikzpicture}}} & {$S_3=\mathbb{R}^3$}
\end{tblr}
\end{example}
\newpage
\subsection{Column Space and Null Space of a Matrix}
\begin{definition}{Definition: Column Space}
Suppose $A$ is an $m\times n$ matrix with columns $\vv{a_1},\vv{a_2},\hdots,\vv{a_n}$. Then the \textbf{column space} of $A$ is the set:
\begin{align*}
\text{col}A&=\text{span}\(\vv{a_1},\vv{a_2},\hdots,\vv{a_n}\) \\
&=\left\{c_1\vv{a_1}+c_2\vv{a_2}+\hdots+c_n\vv{a_n}\Big|c_1,c_2,\hdots,c_n\in\mathbb{R}\right\} \\
&=\left\{\vv{b}\in\mathbb{R}^m\Big|A\vv{x}=\vv{b}\text{ is consistant}\right\}
\end{align*}
$\star$ Note: col$A$ is a subspace of $\mathbb{R}^m$.
\end{definition}
\begin{example}{Example: Is $\vv{b}$ In col$A$}
$A=\begin{bmatrix}1&2&-1\\2&3&-1\\1&4&-3\end{bmatrix}$, $\vv{b}=\begin{bmatrix}3\\2\\11\end{bmatrix}$
\begin{enumerate}
\item {Is $\vv{b}$ in col$A$? \\ \newline
Firstly solve $A\vv{x}=\vv{b}$:
$$\begin{bmatrix}A&\aug&\vv{b}\end{bmatrix}=\begin{bmatrix}1&2&-1&\aug&3\\2&3&-1&\aug&2\\1&4&-3&\aug&11\end{bmatrix}\xrightarrow{Row\hspace{1ex}Reduce}\begin{bNiceMatrix}
1&0&1&\aug&-5\\
0&1&-1&\aug&4 \\
0&0&0&\aug&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}$$
Infinitely Many solutions to $A\vv{x}=\vv{b}$, thus a solution exists so $\vv{b}$ is in col$A$.}
\item {Find a basis for col$A$.
\begin{align*}
\text{col}A&=\text{span}\(\begin{bmatrix}1\\2\\1\end{bmatrix},\begin{bmatrix}2\\3\\4\end{bmatrix},\begin{bmatrix}-1\\-1\\3\end{bmatrix}\)=\text{span}\(\begin{bmatrix}1\\2\\1\end{bmatrix},\begin{bmatrix}2\\3\\4\end{bmatrix}\) \\
A&=\begin{bNiceMatrix}[first-row]
\downarrow&\downarrow&\xmark\\
1&2&-1\\
2&3&-1\\
1&4&-3
\end{bNiceMatrix}\xrightarrow{Row\hspace{1ex}Reduce}\begin{bNiceMatrix}[first-row]
\downarrow&\downarrow&\xmark\\
1&0&1\\
0&1&-4 \\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix} \\
\mathcal{B}_{colA}&=\left\{\begin{bmatrix}1\\2\\1\end{bmatrix},\begin{bmatrix}2\\3\\4\end{bmatrix}\right\}
\end{align*}
As dim$\(\text{col}A\)=2$ then col$A$ is a 2 dimensional subspace of $\mathbb{R}^3$.}
\end{enumerate}
\end{example}
\begin{theorem}{\text{Theorem: Relating Rank, Dimension, And Column Space}}
Let $A$ be an $m\times n$ matrix. Then:
$$\text{rank}A=\text{dim}\(\text{col}A\)$$
$\star$ Note: If the columns of $A$ span $\mathbb{R}^m$, then $\text{col}A=\mathbb{R}^m$.
\end{theorem}
Elementary Row operations can affect the column space of a matrix.
\begin{example}{Example: Row Operations Affect Column Space}
$A=\begin{bmatrix}1&1\\1&1\end{bmatrix}\xrightarrow{R_2-R_1}\begin{bmatrix}1&1\\0&0\end{bmatrix}=U$ \\
\begin{tblr}{width = \linewidth, colspec={XX}, cells = {halign = c, valign=m}}
{$\begin{aligned}
\text{col}A&=\text{span}\(\begin{bmatrix}1\\1\end{bmatrix}\) \\
\text{col}U&=\text{span}\(\begin{bmatrix}1\\0\end{bmatrix}\) \\
\text{col}A&\ne\text{col}U
\end{aligned}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(0,0)};
\addplot [
right,
ultra thick,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{x} node[right] {col$A$} ;
\draw[ultra thick, Stealth-Stealth, red] (-1.5,0) -- (1.5,0) node[pos = 0, above]{col$U$};
\end{axis}
\end{tikzpicture}}}
\end{tblr}
\end{example}
\begin{definition}{Definition: Null Space}
We definie the \textbf{null space} of an $m\times n$ matrix $A$ to be the set:
$$\text{null}A=\left\{\vv{x}\in\mathbb{R}^n\Big|A\vv{x}=\vv{0}\right\}$$
$\star$ Note: null$A$ is the subspace of $\mathbb{R}^n$.
\end{definition}
\begin{example}{Example: Finding null$A$}
$A=\begin{bmatrix}1&2&3\\4&5&6\end{bmatrix}\xrightarrow{Row\hspace{1ex}Reduce}\begin{bmatrix}1&0&-1\\0&1&2\end{bmatrix}$
Finding a basis for null$A$. \\ \newline
Solve $A\vv{x}=\vv{0}$:
\begin{align*}
x_1-x_3&=0 & x_1&=t \\
x_2+2x_3&=0 & x_2&=-2t \\
&&x_3&=t
\end{align*}
Get Parmetric Vector Form of solution set:
$$\vv{x}=\begin{bmatrix}t\\-2t\\t\end{bmatrix}=t\begin{bmatrix}1\\-2\\1\end{bmatrix}$$
The vectors multiplied by parametric form a basis for null$A$:
$$\mathcal{B}_{\text{null}A}=\left\{\begin{bmatrix}1\\-2\\1\end{bmatrix}\right\}$$
$\text{dim}\(\text{null}A\)=1$, thus null$A$ is a one dimensional subspace of $\mathbb{R}^3$.
\end{example}
\begin{definition}{Definition: Nullity}
The nullity of a matrix $A$ is the dimension of null$A$:
$$\text{nullity}\(A\)=\text{dim}\(\text{null}A\)$$
\end{definition}
\begin{example}{Example: Finding Basis For col$A$ And null$A$ And nullity$A$}
Suppose $A=\begin{bNiceMatrix}[first-row]
\downarrow&\downarrow&&&\downarrow \\
1&0&1&1&0 \\
0&-1&0&-1&0 \\
-1&2&-1&1&1 \\
0&1&0&1&1
\end{bNiceMatrix}$ has RREF $\begin{bNiceMatrix}
1&0&1&1&0 \\
0&1&0&1&0 \\
0&0&0&0&1 \\
0&0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-5) circle (2mm) ;
\end{bNiceMatrix}$
Find a basis for col$A$ and a basis for null$A$. Then find rank$A$ and nullity$A$.
$$\mathcal{B}_{\text{col}A}=\left\{\begin{bmatrix}1\\0\\-1\\0\end{bmatrix},\begin{bmatrix}0\\-1\\2\\1\end{bmatrix},\begin{bmatrix}0\\0\\1\\1\end{bmatrix}\right\}$$
Has leading entry's in columns 1, 2 and 5. Which also means rank$A=3$. \\
\newline
Find null$A$ (solve $A\vv{x}=\vv{0}$):
\begin{align*}
x_1+x_3+x_4&=0 & x_1&=-s-t \\
x_2+x_4&=0 & x_2&=-t \\
x_5&=0 & x_3&=s \\
&& x_4&=t \\
&& x_5&=0
\end{align*}
$$\vv{x}=\begin{bmatrix}-s-t\\-t\\s\\t\\0\end{bmatrix}=s\begin{bmatrix}-1\\0\\1\\0\\0\end{bmatrix}+t\begin{bmatrix}-1\\-1\\0\\1\\0\end{bmatrix}$$
$$\mathcal{B}_{\text{null}A}\left\{\begin{bmatrix}-1\\0\\1\\0\\0\end{bmatrix},\begin{bmatrix}-1\\-1\\0\\1\\0\end{bmatrix}\right\}$$
nullity$A=2$
\end{example}
$\star$ Note: If null$A=\left\{\vv{0}\right\}$, then $\mathcal{B}_{\text{null}A}=\emptyset$.
\begin{theorem}{Theorem: Rank-Nullity Theorem}
If $A$ is an $m\times n$ matrix, then:
$$\text{rank}A+\text{nullity}A=n$$
\end{theorem}
\begin{theorem}{Theorem: FTIM Countined}
Let $A$ be an $n\times n$ matrix. The following are equivalent: \\ \newline
Original FTIM:
\begin{enumerate}
\item {$A$ is invertible, ($A^{-1}$ exists)}
\item $A$ is row equivalent to $I_n$. ($I_n$ is the RREF of $A$)
\item rank$A=n$
\item $A\vv{x}=\vv{b}$ has a solution for every $\vv{b}\in\mathbb{R}^n$. (more specifically, there is one unique solution $\vv{x}=A^{-1}\vv{b}$.)
\item $A\vv{x}=\vv{0}$ has only the trivial solution $\vv{x}=\vv{0}$.
\item The columns of $A$ span $\mathbb{R}^n$
\item {The columns of $A$ are linearly independent.}
\end{enumerate}
Added:
\begin{enumerate}[resume]
\item {nullity$A=0$}
\item col$A=\mathbb{R}^n$
\item null$A=\left\{\vv{0}\right\}$
\item {The columns of $A$ form a basis for $\mathbb{R}^n$.}
\end{enumerate}
\end{theorem}
\newpage
\section{Section 3.6 Transformations}
\begin{definition}{Definition: Transformation/Function/Mapping}
A \textbf{transformation/function/mapping} $T$ from $\mathbb{R}^n$ to $\mathbb{R}^m$ is a rule that assigns to each vector $\vv{x}\in\mathbb{R}^n$ a vector $T\(\vv{x}\)\in\mathbb{R}^m$.
$$T:\mathbb{R}^n\rightarrow\mathbb{R}^m\qquad\vv{x}\mapsto\underbracket{T\(\vv{x}\)}_{\text{`` $T$ of $\vv{x}$ ''}}$$
$\mathbb{R}^n$ is the \textbf{domain} of $T$. \\ \newline
$\mathbb{R}^m$ is the \textbf{codomain} of $T$. \\ \newline
For $\vv{x}\in\mathbb{R}^n$, $T(x)\in\mathbb{R}^m$ is called the \textbf{image} of $\vv{x}$.\\ \newline
The set of all images is called the \textbf{range} of $T$.
$$\text{range}\(T\)=\left\{T\(\vv{x}\)\Big|\vv{x}\in\text{domain}\(T\)\right\}$$
\end{definition}
\begin{example}{Example: The Domain And Codomain Of $T$}
\begin{align*}
&T_1\(\vv{x}\)=3\vv{x}& &T_1:\text{ }\mathbb{R}^n\rightarrow \mathbb{R}^n \\
&T_2\(\begin{bmatrix}x\\y\\z\end{bmatrix}\)=\begin{bmatrix}x\\y\end{bmatrix}& &T_2:\text{ }\mathbb{R}^3\rightarrow \mathbb{R}^2
\end{align*}
\end{example}
\begin{example}{Example: Transformation}
\begin{align*}
S\(x_1,\text{ }x_2\)&=\(x_2-3x_1,\text{ }4+x_2,\text{ }9,\text{ }3-x_1\) & &S:\text{ }\mathbb{R}^2\rightarrow \mathbb{R}^4 \\
S\(2,\text{ }1\)&=\(1-3\(2\),\text{ }4+1,\text{ }9,\text{ }3-2\) \\
&=\(-5,\text{ }5,\text{ }9,\text{ }1\)
\end{align*}
\end{example}
\begin{definition}{Definition: Matrix Transformation}
Let $A$ be an $m\times n$ matrix. $A$ transformation $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ defined by $T\(\vv{x}\)=A\vv{x}$ is called a \textbf{matrix transformation} and $A$ is called the \textbf{standared matrix} of $T$.
\begin{example}{Example: Matrix Transformation}
$T\(\vv{x}\)=\underbrace{\begin{bmatrix}1&3\\2&1\\1&4\end{bmatrix}}_{3\times2}\underbrace{\vv{x}}_{2\times1}$ \\
Domain: $\mathbb{R}^2$ \\ \newline
Codomain: $\mathbb{R}^3$
\end{example}
\end{definition}
\begin{example}{Example: Doing Stuff With Transformations}
Let $A=\begin{bmatrix}1&2&3\\0&1&0\end{bmatrix}$ and $\vv{u}=\begin{bmatrix}0\\1\\-1\end{bmatrix}$. Define $T\(\vv{x}\)=A\vv{x}$.
\begin{enumerate}
\item{Find $T\(\vv{u}\)$
$$T\(\vv{u}\)=A\vv{u}=\begin{bmatrix}1&2&3\\0&1&0\end{bmatrix}\begin{bmatrix}0\\1\\-1\end{bmatrix}=\begin{bmatrix}-1\\1\end{bmatrix}$$}
\item{If $T\(\vv{x}\)=\vv{0}$, Find $\vv{x}$? \\ \newline
Firstly what are we meant to do:
$$T\(\vv{x}\)=A\vv{x}=\begin{bmatrix}1&2&3\\0&1&0\end{bmatrix}\vv{x}=\vv{0}$$
Next Row Reduce:
$$\begin{bmatrix}1&2&3&\aug&0\\0&1&0&\aug&0\end{bmatrix}\xrightarrow{R_1-2R_2}\begin{bmatrix}1&0&3&\aug&0\\0&1&0&\aug&0\end{bmatrix}$$
Solve System of Equations Using Back Substitution:
\begin{align*}
x_1+3x_3&=0 & x_1&=-3t \\
x_2&=0 & x_2&=0 \\
&&x_3&=t
\end{align*}
$\vv{x}=t\begin{bmatrix}-3\\0\\1\end{bmatrix}$
Check: $A\vv{x}=\begin{bmatrix}1&2&3\\0&1&0\end{bmatrix}\begin{bmatrix}-3t\\0\\t\end{bmatrix}=\begin{bmatrix}0\\0\end{bmatrix}\checkmark$}
\item{What is the range of $T$?
\begin{align*}
\text{range}\(T\)&=\left\{T\(\vv{x}\)\Big|\vv{x}\in\text{domain}\(T\)\right\} \\
&=\left\{A\vv{x}\Big|\vv{x}\in\text{domain}\(T\)\right\} \\
&=\left\{\vv{b}\in\mathbb{R}^2\Big|A\vv{x}=\vv{b}\text{ is consistant}\right\} \\
&=\text{col}A \\
&=\text{span}\(\begin{bmatrix}1\\0\end{bmatrix},\begin{bmatrix}2\\1\end{bmatrix},\begin{bmatrix}3\\0\end{bmatrix}\) \\
&=\text{span}\(\begin{bmatrix}1\\0\end{bmatrix},\begin{bmatrix}2\\1\end{bmatrix}\) \\
\Aboxed{\text{range}\(T\)&=\mathbb{R}^2}
\end{align*}
\begin{center}
\begin{tikzpicture}
\fill (0,0) circle[radius=4pt];
\draw[dotted, ultra thick, Stealth-Stealth] (-3,-1.5) -- (3,1.5);
\draw[ultra thick, -Stealth] (0,0) -- (2,1);
\draw[dotted, ultra thick, Stealth-Stealth] (-3,0) -- (3,0);
\draw[ultra thick, -Stealth] (0,0) -- (1,0);
\end{tikzpicture}
\end{center}}
\end{enumerate}
\end{example}
\begin{definition}{Definition: Kernal}
The set of all vectors $\vv{x}$ that $T$ maps to $\vv{0}$ is called the \textbf{kernal} of $T$.
$$\text{ker}T=\left\{\vv{x}\in\text{domain}\(T\)\Big|T\(\vv{x}\)=\vv{0}\right\}$$
\end{definition}
Let $A$ be an $m\times n$ matrix and define $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ by $T\(\vv{x}\)=A\vv{x}$. Then:
\begin{enumerate}
\item {range$\(T\)=\text{col}A$}
\item {and ker$\(T\)=\text{null}A$}
\end{enumerate}
\subsection{Linear Transformations}
\begin{definition}{Definition: Linear Transformation}
A transformation $T$ is \textbf{linear} if for all $\vv{u}$ and $\vv{v}$ in the domain of $T$ and all $c\in\mathbb{R}$, we have:
\begin{enumerate}
\item $T\(\vv{u}+\vv{v}\)=T\(\vv{u}\)+T\(\vv{v}\)$
\item $T\(c\vv{u}\)=cT\(\vv{u}\)$
\end{enumerate}
1 and 2 can be combined as followes: $T$ is linear provided
$$T\(c\vv{u}+d\vv{v}\)=cT\(\vv{u}\)+dT\(\vv{v}\)$$
for all $\vv{u},\vv{v}\in\text{domain}\(T\)$ and $c,d\in\mathbb{R}$.
\end{definition}
\begin{theorem}{Theorem: All Matrix Transformations Are Linear Transformations}
All Matrix Transformations Are Linear Transformations
\begin{proof}
Let $T$ be a matrix transformation with standared matrix $A$. Then $T\(\vv{x}\)=A\vv{x}$. Let $\vv{u}$ and $\vv{v}$ be vectors in the domain of $T$ and let $c,d\in\mathbb{R}$. Then:
\begin{align*}
T\(c\vv{u}+d\vv{v}\)&=A\(c\vv{u}+d\vv{v}\) \\
&=A\(c\vv{u}\)+A\(d\vv{v}\) \\
&=cA\vv{u}+dA\vv{v} \\
&=cT\(\vv{u}\)+dT\(\vv{v}\)
\end{align*}
Therfore $T$ is linear.
\end{proof}
\end{theorem}
$\star$ Note: If $T$ is linear, then $T\(\vv{0}\)=\vv{0}$. If $T\(\vv{0}\)\ne\vv{0}$, then $T$ is \textbf{not} linear.
\begin{example}{Example: $T$ Is Not Linear As \text{$T\(\vv{0}\)\ne\vv{0}$}}
$T\(x,y,z\)=\(4+x,\hspace{1ex}y+x\)$
$$T\(0,0,0\)=\(4+0,0+0\)=\(4,0\)\ne\(0,0\)$$
Thus as $T\(\vv{0}\)\ne\vv{0}$ then $T$ is not linear.
\end{example}
\begin{example}{Example: $T$ May Or May Not Be Linear}
$T\(x,y\)=\(x^2,y^2\)$
$$T\(0,0\)=\(0^2,0^2\)=\(0,0\)$$
If $T\(\vv{0}\)=\vv{0}$, $T$ may or may not be linear. We need to work with the definition.\\ \newline
Is $T\(c\(x,y\)\)=cT\(x,y\)$ for all $c\in\mathbb{R}$, $\(x,y\)\in\mathbb{R}^2$?
$$T\(c\(x,y\)\)=T\(cx,cy\)=\(c^2x^2,c^2y^2\)$$
$$cT\(x,y\)=c\(x^2,y^2\)=\(cx^2,cy^2\)$$
As $\(c^2x^2,c^2y^2\)\ne\(cx^2,cy^2\)$ then there is a \textbf{Counter Example:}
$$T\(-1\(2,3\)\)=T\(-2,-3\)=\(4,9\)$$
$$-1T\(2,3\)=-1\(4,9\)=\(-4,-9\)$$
Which as $\(4,9\)\ne\(-4,-9\)$ then $T$ is not linear.
\end{example}
\begin{example}{Example: Is $T$ Linear?}
Suppose $T:\mathbb{R}^2\rightarrow\mathbb{R}^3$ is defined by:
$$T\(x,y\)=\(3x,0,x+y\)$$
Is $T$ linear? \\
$\star$ Note: Does $T\(c\vv{u},d\vv{v}\)=cT\(\vv{u}\)+dT\(\vv{v}\)$? \\ \newline
Let $\vv{u},\vv{v}\in\mathbb{R}^2$ and $c,d\in\mathbb{R}$, then:
$$\vv{u}=\(u_1,u_2\)\qquad\vv{v}=\(v_1,v_2\)$$
that also means that:
$$c\vv{u}+d\vv{v}=\(cu_1+dv_1,cu_2+dv_2\)$$
Now Calculating $T\(c\vv{u}+d\vv{v}\)$:
\begin{align*}
T\(c\vv{u}+d\vv{v}\)&=T\(c\(u_1,u_2\)+d\(v_1,v_2\)\) \\
&=T\(\(cu_1,cu_2\)+\(dv_1,dv_2\)\) \\
&=T\(cu_1+dv_1,cu_2+dv_2\) \\
&=\(3\(cu_1+dv_1\),0,cu_1+dv_1+cu_2+dv_2\) \\
&=\(3cu_1+3dv_1,0,cu_1+dv_1+cu_2+dv_2\)
\end{align*}
Now Calculating $cT\(\vv{u}\)+dT\(\vv{v}\)$:
\begin{align*}
cT\(\vv{u}\)+dT\(\vv{v}\)&=cT\(u_1,u_2\)+dT\(v_1,v_2\) \\
&=c\(3u_1,0,u_1+u_2\)+d\(3v_1,0,v_1+v_2\) \\
&=\(3cu_1,0,cu_1+cu_2\)+\(3dv_1,0,dv_1+dv_2\) \\
&=\(3cu_1+3dv_1,0,cu_1+cu_2+du_1+dv_2\)
\end{align*}
As $T\(c\vv{u}+d\vv{v}\)=cT\(\vv{u}\)+dT\(\vv{v}\)$ then $T$ is linear.
\end{example}
All linear transformations are matrix transformations: if $T$ is linear, there exists a matrix $A$ such that $T\(\vv{x}\)=A\vv{x}$. ($A$ is the \textbf{standard matrix} of $T$) \\ \newline
How can we find the standard matrix of a linear transformation? \\ \newline
Suppose $T:\mathbb{R}^2\rightarrow\mathbb{R}^4$ is a linear transformation, then for $\vv{x}=\begin{bmatrix}x_1\\x_2\end{bmatrix}\in\mathbb{R}^2$, we have:
\begin{align*}
T\(\vv{x}\)&=T\(\begin{bmatrix}x_1\\x_2\end{bmatrix}\) \\
&=\underbrace{T\(x_1\begin{bmatrix}1\\0\end{bmatrix}+x_2\begin{bmatrix}0\\1\end{bmatrix}\)}_{\text{$T$ is linear}} \\
&=x_1T\(\begin{bmatrix}1\\0\end{bmatrix}\)+x_2T\(\begin{bmatrix}0\\1\end{bmatrix}\) \\
&=x_1T\(\vv{e_1}\)+x_2T\(\vv{e_2}\) \\
&=\fbox{$\begin{bmatrix}T\(\vv{e_1}\)&T\(\vv{e_2}\)\end{bmatrix}$}\begin{bmatrix}x_1\\x_2\end{bmatrix} \\
&=A\vv{x}
\end{align*}
\begin{definition}{Definition: Standard Matrix}
The \textbf{standard matrix} $A$ of a linear transformation $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ is the $m\times n$ matrix
$$A=\begin{bNiceMatrix}T\(\vv{e_1}\)&T\(\vv{e_2}\)&\Hdotsfor{1}&T\(\vv{e_n}\)\end{bNiceMatrix}$$
where $\left\{\vv{e_1},\vv{e_2},\hdots,\vv{e_n}\right\}$ is the standard basis for $\mathbb{R}^n$. \\ \newline
$\star$ Note: $A$ is unique.
\end{definition}
\begin{example}{Example: Finding The Standard Matrix Of The Previous Example}
$T\(x,y\)=\(3x,0,x+y\)\qquad T:\mathbb{R}^2\rightarrow\mathbb{R}^3$ \\ \newline
We've shown $T$ is linear in the previous example, now find the standard matrix for $T$. \\ \newline
As $A=\begin{bNiceMatrix}T\(\vv{e_1}\)&T\(\vv{e_2}\)&\Hdotsfor{1}&T\(\vv{e_n}\)\end{bNiceMatrix}$ and:
\begin{align*}
T\(1,0\)=\(3,0,1\) \\
T\(0,1\)=\(0,0,1\)
\end{align*}
then $A=\begin{bmatrix}3&0\\0&0\\1&1\end{bmatrix}$ this can also be seen below:
$$T\(\begin{bmatrix}x\\y\end{bmatrix}\)=A\begin{bmatrix}x\\y\end{bmatrix}=\begin{bmatrix}3&0\\0&0\\1&1\end{bmatrix}\begin{bmatrix}x\\y\end{bmatrix}=\begin{bmatrix}3x\\0\\x+y\end{bmatrix}$$
\end{example}
\begin{example}{Example: Finding The Standard Matrix}
$T\(x_1,x_2,x_3\)=\(4x_2,-x_1,x_2-x_1,x_3\)\qquad T:\mathbb{R}^3\rightarrow\mathbb{R}^4$ is linear. Find the standard matrix of $T$
\begin{align*}
T\(1,0,0\)&=\(0,-1,-1,0\) \\
T\(0,1,0\)&=\(4,0,1,0\) \\
T\(0,0,1\)&=\(0,0,0,1\)
\end{align*}
Thus $A=\begin{bmatrix}0&4&0\\-1&0&0\\-1&1&0\\0&0&1\end{bmatrix}$
$$T\(\begin{bmatrix}x_1\\x_2\\x_3\end{bmatrix}\)=\begin{bmatrix}0&4&0\\-1&0&0\\-1&1&0\\0&0&1\end{bmatrix}\begin{bmatrix}x_1\\x_2\\x_3\end{bmatrix}=\begin{bmatrix}4x_2\\-x_1\\-x_1+x_2\\x_3\end{bmatrix}$$
\end{example}
\subsection{Onto and One-to-One}
\begin{definition}{Example: Onto}
A linear transformation $T:\mathbb{R}^n\rightarrow\textcolor{blue}{\mathbb{R}^m}$ is \textbf{onto} if range$\(T\)=\textcolor{blue}{\mathbb{R}^m}$. \\ \newline
$\star$ Note:
\begin{itemize}
\item If $T\(\vv{x}\)=A\vv{x}$, then \ul{range$\(T\)=\text{col}\(A\)$}, so $T$ is onto if \ul{col$A=\textcolor{blue}{\mathbb{R}^m}$}.
\item $T$ is onto if and only if $T\(\vv{x}\)=$\ul{$A\vv{x}=\vv{b}$} is consistant for every $\vv{b}\in\mathbb{R}^m$.
\end{itemize}
\end{definition}
\begin{definition}{Definition: One-To-One}
A linear transformation $T:\mathbb{R}^n\rightarrow\mathbb{R}^m$ is \textbf{one-to-one} (1-1) if and only if $T\(\vv{u}\)=T\(\vv{v}\)$ implies $\vv{u}=\vv{v}$. (If $\vv{u}\ne\vv{v}$, then $T\(\vv{u}\)\ne T\(\vv{v}\)$ when $T$ is 1-1) \\ \newline
$\star$ Note: If $T\(\vv{x}\)=A\vv{x}$, $T$ is 1-1 if and only if $T\(\vv{x}\)=$\ul{$A\vv{x}=\vv{b}$} has at most one solution for each $\vv{b}$ in $\mathbb{R}^m$.
\end{definition}
\begin{theorem}{Theorem: One-To-One And Kernal Are Related}
$T$ is 1-1 if and only if ker$T=\left\{\vv{0}\right\}$. \\ \newline
$\star$ Note: If $T\(\vv{x}\)=A\vv{x}$, then ker$T=\text{null}A$ so $T$ is $\text{1-1}$ if and only if null$A=\left\{\vv{0}\right\}$.
\end{theorem}
\begin{example}{Example: One-to-One And Onto Using Real-Valued Functions From $\mathbb{R}$ To $\mathbb{R}$}
Notes:
\begin{itemize}
\item $f:\mathbb{R}\rightarrow\mathbb{R}$ is onto if range of $f=\text{ codomain of }f=\mathbb{R}$
\item $f$ is one-to-one if graph of $f$ passes the horizontal line test (no 2 distinct inputs have the same output)
\end{itemize}
\begin{enumerate}
\item {$f\(x\)=-x\qquad f:\mathbb{R}\rightarrow\mathbb{R}$ \\
\begin{tblr}{width = \linewidth, colspec={XX},cells = {halign=l,valign=m}}
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
ticks = none,
clip = false,
]
\addplot [
right,
ultra thick,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{-x};
\end{axis}
\end{tikzpicture}}} &
{Range of $f=\mathbb{R}$ so \textbf{$f$ is onto}. \\ \newline
Graph of $f$ passes the horizontal line test so \textbf{$f$ is 1-1}.}
\end{tblr}}
\item {$f\(x\)=e^x\qquad f:\mathbb{R}\rightarrow\mathbb{R}$ \\
\begin{tblr}{width = \linewidth, colspec={XX},cells = {halign=l,valign=m}}
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
ticks = none,
clip = false,
]
\addplot [
right,
ultra thick,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:0.916290731874,
]
{e^x};
\end{axis}
\end{tikzpicture}}} &
{Range of $f=\(0,\infty\)\ne\mathbb{R}$ so \textbf{$f$ is not onto}. \\ \newline
Graph of $f$ passes the horizontal line test so \textbf{$f$ is 1-1}.}
\end{tblr}}
\item {$f\(x\)=x^3-x\qquad f:\mathbb{R}\rightarrow\mathbb{R}$ \\
\begin{tblr}{width = \linewidth, colspec={XX},cells = {halign=l,valign=m}}
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -1, ymax = 1,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
ticks = none,
clip = false,
]
\addplot [
right,
ultra thick,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -1.3247179572447:1.3247179572447,
]
{x^3-x};
\addplot [
right,
ultra thick,
dotted,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{0.19245008973};
\addplot [
right,
ultra thick,
dotted,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{-0.19245008973};
\addplot[mark=*, ultra thick] coordinates {(-0.88455193089183,0.19245008973)};
\addplot[mark=*, ultra thick] coordinates {(-0.20051164424072,0.19245008973)};
\addplot[mark=*, ultra thick] coordinates {(1.0850635751325,0.19245008973)};
\addplot[mark=*, ultra thick] coordinates {(-1.0850635751325,-0.19245008973)};
\addplot[mark=*, ultra thick] coordinates {(0.20051164424072,-0.19245008973)};
\addplot[mark=*, ultra thick] coordinates {(0.88455193089183,-0.19245008973)};
\addplot[mark=*, ultra thick] coordinates {(-1,0)};
\addplot[mark=*, ultra thick] coordinates {(1,0)};
\end{axis}
\end{tikzpicture}}} &
{Range of $f=\mathbb{R}$ so \textbf{$f$ is onto}. \\ \newline
Graph of $f$ fails the horizontal line test so \textbf{$f$ is not 1-1} \\
ex: $f\(-1\)=f\(1\)=0$ and $-1\ne1$.}
\end{tblr}}\item {$f\(x\)=x^2\qquad f:\mathbb{R}\rightarrow\mathbb{R}$ \\
\begin{tblr}{width = \linewidth, colspec={XX},cells = {halign=l,valign=m}}
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
ticks = none,
clip = false,
]
\addplot [
right,
ultra thick,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -1.58113883008:1.58113883008,
]
{x^2};
\addplot [
right,
ultra thick,
dotted,
samples = 200,
style = {Stealth-Stealth, thick},
domain = -2.5:2.5,
]
{2};
\addplot[mark=*, ultra thick] coordinates {(-1.41421356237,2)};
\addplot[mark=*, ultra thick] coordinates {(1.41421356237,2)};
\end{axis}
\end{tikzpicture}}} &
{Range of $f=\left[0,\infty\right)\ne\mathbb{R}$ so \textbf{$f$ is not onto}. \\ \newline
Graph of $f$ fails the horizontal line test so \textbf{$f$ is not 1-1} \\
ex: $f\(-2\)=f\(2\)=4$ and $-2\ne2$.}
\end{tblr}}
\end{enumerate}
\end{example}
\begin{example}{Example: One-to-One And Onto Using Linear Transformations \text{$T:\mathbb{R}^n\rightarrow\mathbb{R}^m$}}
\textbf{Recall: }If $A$ is the standard matrix of $T$, then
\begin{enumerate}
\item range$\(T\)=\text{col}\(A\)$.
\item ker$\(T\)=\text{null}\(A\)$.
\end{enumerate}
If $T\(\vv{x}\)=A\vv{x}$, then $T$ is \textbf{onto} if $\text{range}\(T\)=\text{codomain}\(T\)$, which also means $\text{col}\(A\)=\text{codomain}\(T\)$. \\ \newline
If $T\(\vv{x}\)=A\vv{x}$ then $T$ is 1-1 if $\text{ker}\(T\)=\left\{\vv{0}\right\}$, which also means $\text{null}\(A\)=\left\{\vv{0}\right\}$.
\begin{enumerate}
\item{$T:\mathbb{R}^2\rightarrow\mathbb{R}^3\qquad T\(\vv{x}\)=\begin{bmatrix}1&2\\0&3\\0&9\end{bmatrix}\vv{x}$ \\
$$A=\begin{bmatrix}1&2\\0&3\\0&9\end{bmatrix}\xrightarrow{R_3-3R_2}\begin{bNiceMatrix}
1&2\\
0&3\\
0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}$$
\begin{itemize}
\item {rank$A=2$ so col$A$ is a 2 dimensional subspace of $\mathbb{R}^3$ thus col$A\ne\mathbb{R}^3$ making range$\(T\)\ne\mathbb{R}^3$ so \textbf{$T$ is not onto.}}
\item {nullity$A=0$ so null$A=\left\{\vv{0}\right\}$ thus ker$T=\left\{\vv{0}\right\}$ so \textbf{$T$ is 1-1}.}
\end{itemize}}
\item{$T:\mathbb{R}^3\rightarrow\mathbb{R}^2\qquad T\(\vv{x}\)=\begin{bmatrix}1&2&3\\0&4&5\end{bmatrix}\vv{x}$ \\
$$A=\begin{bNiceMatrix}
1&2&3\\
0&4&5
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}$$
\begin{itemize}
\item {rank$A=2$ so col$A$ is a 2 dimensional subspace of $\mathbb{R}^2$ thus col$A=\mathbb{R}^2$ making range$\(T\)=\mathbb{R}^2$ so \textbf{$T$ is onto.}}
\item {nullity$A=1$ so null$A\ne\left\{\vv{0}\right\}$ thus ker$T\ne\left\{\vv{0}\right\}$ so \textbf{$T$ is not 1-1}.}
\end{itemize}}
\item{$T:\mathbb{R}^2\rightarrow\mathbb{R}^2\qquad T\(\vv{x}\)=\begin{bmatrix}1&1\\1&1\end{bmatrix}\vv{x}$ \\
$$A=\begin{bmatrix}1&1\\1&1\end{bmatrix}\xrightarrow{R_2-R_1}\begin{bNiceMatrix}
1&1\\
0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}$$
\begin{itemize}
\item {rank$A=1$ so col$A$ is a 1 dimensional subspace of $\mathbb{R}^2$ thus col$A\ne\mathbb{R}^2$ making range$\(T\)\ne\mathbb{R}^2$ so \textbf{$T$ is not onto.}}
\item {nullity$A=1$ so null$A\ne\left\{\vv{0}\right\}$ thus ker$T\ne\left\{\vv{0}\right\}$ so \textbf{$T$ is not 1-1}.}
\end{itemize}}
\item{$T:\mathbb{R}^2\rightarrow\mathbb{R}^2\qquad T\(\vv{x}\)=\begin{bmatrix}1&2\\3&4\end{bmatrix}\vv{x}$ \\
$$A=\begin{bmatrix}1&2\\3&4\end{bmatrix}\xrightarrow{R_2-3R_1}\begin{bNiceMatrix}
1&2\\
0&-2
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (3mm) ;
\end{bNiceMatrix}$$
\begin{itemize}
\item {rank$A=2$ so col$A$ is a 2 dimensional subspace of $\mathbb{R}^2$ thus col$A=\mathbb{R}^2$ making range$\(T\)=\mathbb{R}^2$ so \textbf{$T$ is onto.}}
\item {nullity$A=0$ so null$A=\left\{\vv{0}\right\}$ thus ker$T=\left\{\vv{0}\right\}$ so \textbf{$T$ is 1-1}.}
\end{itemize}}
\end{enumerate}
\end{example}
\begin{example}{Example: One-to-One and Onto Using Schematic Diagrams}
In the examples below, the domain and codomain of the transformation $T$ will be finite sets.
\begin{itemize}
\item $T$ is \textbf{onto} if range$\(T\)=\text{codomain}\(T\)$
\item $T$ is 1-1 is $T\(\vv{u}\)=T\(\vv{v}\)$ implies $\vv{u}=\vv{v}$ (in other words, if $\vv{u}\ne\vv{v}$ then $T\(\vv{u}\)\ne T\(\vv{v}\)$ when $T$ is 1-1).
\end{itemize}
\begin{enumerate}
\item {$T:A\rightarrow B$ \\
\begin{tblr}{width = \linewidth, colspec = {XX}, cells = {halign = c, valign = m}}
{$\begin{aligned}
A&=\left\{ a_1,a_2,a_3\right\} \\
B&=\left\{ b_1,b_2,b_3,b_4\right\}
\end{aligned}$} &
{\raisebox{-.5\height}{\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
%Draws A points
\foreach[count=\i] \j in {0, -0.75, -1.5} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[left, name = l-\i, text height=1.4ex]$a_\i$}] (A-\i) at (-1,\j cm) {};
}
\node[ellipse, draw, fit=(A-1) (A-2) (A-3) (l-1), label={[name=A]above:$A$}] {};
%Draws B points
\foreach[count=\i] \j in {0, -0.5,..., -1.5} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[right, name = l-\i, text height=1.4ex]$b_\i$}] (B-\i) at (1,\j cm) {};
}
\node[ellipse, draw, fit=(B-1) (B-2) (B-3) (B-4) (l-1), label={[name=B]above:$B$}] {};
\draw[-Stealth, ultra thick] (A) -- node[above]{$T$}(B);
\draw[-Stealth, ultra thick] (A-1) -- (B-2);
\draw[-Stealth, ultra thick] (A-2) -- (B-3);
\draw[-Stealth, ultra thick] (A-3) -- (B-1);
\end{tikzpicture}}}}
\end{tblr}
\textbf{T is not onto} ($b_4$ is in codomain but not in range$\(T\)$) \\ \newline
\textbf{T is 1-1} ($a_1,a_2,a_3$ have distinct images)}
\item {$T:A\rightarrow B$ \\
\begin{tblr}{width = \linewidth, colspec = {XX}, cells = {halign = c, valign = m}}
{$\begin{aligned}
A&=\left\{ a_1,a_2,a_3\right\} \\
B&=\left\{ b_1,b_2\right\}
\end{aligned}$} &
{\raisebox{-.5\height}{\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
%Draws A points
\foreach[count=\i] \j in {0,-0.5,...,-1} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[left, name = l-\i, text height=1.4ex]$a_\i$}] (A-\i) at (-1,\j cm) {};
}
\node[ellipse, draw, fit=(A-1) (A-2) (A-3) (l-1), label={[name=A]above:$A$}] {};
%Draws B points
\foreach[count=\i] \j in {0, -1} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[right, name = l-\i, text height=1.4ex]$b_\i$}] (B-\i) at (1,\j cm) {};
}
\node[ellipse, draw, fit=(B-1) (B-2) (l-1), label={[name=B]above:$B$}] {};
\draw[-Stealth, ultra thick] (A) -- node[above]{$T$}(B);
\draw[-Stealth, ultra thick] (A-1) -- (B-2);
\draw[-Stealth, ultra thick] (A-2) -- (B-1);
\draw[-Stealth, ultra thick] (A-3) -- (B-1);
\end{tikzpicture}}}}
\end{tblr}
\textbf{T is onto} (range$T=B$) \\ \newline
\textbf{T is not 1-1} ($T\(a_2\)=T\(a_3\)$ and $a_2\ne a_3$)}
\item {$T:A\rightarrow B$ \\
\begin{tblr}{width = \linewidth, colspec = {XX}, cells = {halign = c, valign = m}}
{$\begin{aligned}
A&=\left\{ a_1,a_2\right\} \\
B&=\left\{ b_1,b_2\right\}
\end{aligned}$} &
{\raisebox{-.5\height}{\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
%Draws A points
\foreach[count=\i] \j in {0,-0.5} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[left, name = l-\i, text height=1.4ex]$a_\i$}] (A-\i) at (-1,\j cm) {};
}
\node[ellipse, draw, fit=(A-1) (A-2) (l-1), label={[name=A]above:$A$}] {};
%Draws B points
\foreach[count=\i] \j in {0, -0.5} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[right, name = l-\i, text height=1.4ex]$b_\i$}] (B-\i) at (1,\j cm) {};
}
\node[ellipse, draw, fit=(B-1) (B-2) (l-1), label={[name=B]above:$B$}] {};
\draw[-Stealth, ultra thick] (A) -- node[above]{$T$}(B);
\draw[-Stealth, ultra thick] (A-1) -- (B-2);
\draw[-Stealth, ultra thick] (A-2) -- (B-1);
\end{tikzpicture}}}}
\end{tblr}
\textbf{T is onto} (range$T=B$) \\ \newline
\textbf{T is 1-1} ($a_1,a_2$ have distinct images)}
\item {$T:A\rightarrow B$ \\
\begin{tblr}{width = \linewidth, colspec = {XX}, cells = {halign = c, valign = m}}
{$\begin{aligned}
A&=\left\{ a_1,a_2\right\} \\
B&=\left\{ b_1,b_2\right\}
\end{aligned}$} &
{\raisebox{-.5\height}{\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
%Draws A points
\foreach[count=\i] \j in {0,-0.5} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[left, name = l-\i, text height=1.4ex]$a_\i$}] (A-\i) at (-1,\j cm) {};
}
\node[ellipse, draw, fit=(A-1) (A-2) (l-1), label={[name=A]above:$A$}] {};
%Draws B points
\foreach[count=\i] \j in {0, -0.5} {
\node[minimum width=1em, fill = black, draw, circle, inner sep=0pt,minimum size=4pt, label = {[right, name = l-\i, text height=1.4ex]$b_\i$}] (B-\i) at (1,\j cm) {};
}
\node[ellipse, draw, fit=(B-1) (B-2) (l-1), label={[name=B]above:$B$}] {};
\draw[-Stealth, ultra thick] (A) -- node[above]{$T$}(B);
\draw[-Stealth, ultra thick] (A-1) -- (B-2);
\draw[-Stealth, ultra thick] (A-2) -- (B-2);
\end{tikzpicture}}}}
\end{tblr}
\textbf{T is not onto} ($b_1$ is in $B$ but not in range$T$) \\ \newline
\textbf{T is not 1-1} ($T\(a_1\)=T\(a_2\)$ and $a_1\ne a_2$)}
\end{enumerate}
\end{example}
\begin{example}{Example: Is A Matrix Onto And 1-1}
$T:\mathbb{R}^2\rightarrow\mathbb{R}^4\qquad T\(\vv{x}\)=\underbrace{\begin{bmatrix}1&0\\0&1\\-1&2\\0&1\end{bmatrix}}_{A}\vv{x}$
$$A\xrightarrow{Row\text{ }Reduce}\underbrace{\begin{bNiceMatrix}
1&0\\
0&1\\
0&0\\
0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}}_{\text{rank}A=2}$$
\begin{enumerate}
\item {Is $T$ onto? \textbf{No} \\Is range$T=\mathbb{R}^4$? \textbf{No} \\ Is col$A=\mathbb{R}^4$? \textbf{No} \\ col$A$ is a 2-dimensional subspace of $\mathbb{R}^4$ thus col$A\ne\mathbb{R}^4$.}
\item {Is $T$ 1-1? \textbf{Yes}\\Is ker$T=\left\{\vv{0}\right\}$? \textbf{Yes}\\ Is null$A=\left\{\vv{0}\right\}$? \textbf{Yes}, $A\vv{x}=\vv{0}$ has only the trivial solution as \\ rank$A=\text{ number of columns}$.}
\end{enumerate}
\end{example}
\subsection{Composition and Inverses of Linear Transformations}
\begin{definition}{Definition: Composition}
Let $S:\textcolor{blue}{\mathbb{R}^n}\rightarrow\mathbb{R}^m$ and $T:\mathbb{R}^p\rightarrow\textcolor{blue}{\mathbb{R}^n}$ be linear transformations. Then the \textbf{composition} of $S$ and $T$, denoted $S\circ T$, is defined as:
$$\(S\circ T\)\(\vv{x}\)=S\(T\(x\)\)$$
If $S\(\vv{x}\)=A\vv{x}$ and $T\(\vv{x}\)=B\vv{x}$ then $\(S\circ T\)\(\vv{x}\)=S\(T\(\vv{x}\)\)=S\(B\vv{x}\)=AB\vv{x}$. Thus $S\circ T$ is a linear transformation with standard matrix $AB$.
\end{definition}
\begin{example}{Example: Find A \text{$\vv{x}$} Of A Composite Transformation}
$S\(\vv{x}\)=\begin{bmatrix}1&0\\0&1\\1&-1\end{bmatrix}\vv{x}\text{ and }T\(\vv{x}\)=\begin{bmatrix}1&2\\2&-1\end{bmatrix}\vv{x}$. Find $\(S\circ T\)\(\begin{bmatrix}1\\1\end{bmatrix}\)$.
\begin{align*}
\(S\circ T\)\(\begin{bmatrix}1\\1\end{bmatrix}\)&=S\(T\(\begin{bmatrix}1\\1\end{bmatrix}\)\) \\
&=S\(\begin{bmatrix}1&2\\2&-1\end{bmatrix}\begin{bmatrix}1\\1\end{bmatrix}\) \\
&=S\(\begin{bmatrix}3\\1\end{bmatrix}\) \\
&=\begin{bmatrix}1&0\\0&1\\1&-1\end{bmatrix}\begin{bmatrix}3\\1\end{bmatrix} \\
&=\begin{bmatrix}3\\1\\2\end{bmatrix}
\end{align*}
$S:\textcolor{red}{\mathbb{R}^2}\rightarrow\mathbb{R}^3$ \\
$T:\mathbb{R}^2\rightarrow\textcolor{red}{\mathbb{R}^2}$ \\
$\star$ Note: $T\circ S$ is undefined.
\end{example}
\begin{definition}{Definition: Invertible}
A linear transformation $T:\mathbb{R}^n\rightarrow\mathbb{R}^n$ is \textbf{invertible} if there is a linear transformation \\ $S:\mathbb{R}^n\rightarrow\mathbb{R}^n$ such that:
$$\(S\circ T\)\(\vv{x}\)=\vv{x}\text{ and }\(T\circ S\)\(\vv{x}\)=\vv{x}$$
Then $S=T^{-1}$.
\end{definition}
\begin{theorem}{Theorem: $T$ Is Invertible If $A$ Is Invertible}
Let $T$ be a linear transformation $\(T:\mathbb{R}^n\rightarrow\mathbb{R}^n\)$ with standard matrix $A$ $\(T\(\vv{x}\)=A\vv{x}\)$. Then $T$ is invertible if and only if $A$ is invertible. Then $T^{-1}\(\vv{x}\)=A^{-1}\vv{x}$.
$$\(T\circ T^{-1}\)\(\vv{x}\)=T\(T^{-1}\(\vv{x}\)\)=T\(A^{-1}\vv{x}\)=AA^{-1}\vv{x}=\vv{x}$$
$$\(T^{-1}\circ T\)\(\vv{x}\)=T^{-1}\(T\(\vv{x}\)\)=T^{-1}\(A\vv{x}\)=A^{-1}A\vv{x}=\vv{x}$$
\end{theorem}
\begin{example}{Example: Is A Invertible Linear Transformation One-To-One And Onto}
Let $T:\mathbb{R}^3\rightarrow\mathbb{R}^3$ be the linear transformation with standard matrix $A=\begin{bmatrix}2&4&2\\0&1&0\\4&0&8\end{bmatrix}$.
\begin{enumerate}
\item {Is $T$ invertible?
$$A=\begin{bmatrix}2&4&2\\0&1&0\\4&0&8\end{bmatrix}\xrightarrow{R_3-2R_1}\begin{bmatrix}2&4&2\\0&1&0\\0&-8&4\end{bmatrix}\xrightarrow{R_3+8R_2}\begin{bNiceMatrix}
2&4&2\\
0&1&0\\
0&0&4
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\tikz \draw (3-3) circle (2mm) ;
\end{bNiceMatrix}$$
$A$ is $3\times3$ and rank$A=3$ thus $A^{-1}$ exists so \textbf{$T$ is invertible}. rank$\(A\)=3=n$ thus by FTIM $A$ is invertible making $T$ also invertible.}
\item {Is $T$ onto? \\ \newline
rank$A=3$ and $A$ is $3\times3$ thus col$A$ is a 3-dimensional subspace of $\mathbb{R}^3$ so col$A=\mathbb{R}^3$ making range$\(T\)=\mathbb{R}^3=\text{ codomain}\(T\)$ thus \textbf{$T$ is onto}.}
\item {Is $T$ one-to-one? \\ \newline
nullity$\(A\)=0$ so null$A=\left\{\vv{0}\right\}$ making ker$T=\left\{\vv{0}\right\}$ thus \textbf{$T$ is one-to-one.}}
\end{enumerate}
\end{example}
\begin{theorem}{If A Transformation Is Invertible Then It Is Onto And One-To-One}
Let $T$ be a linear transformation. Then \textbf{$T$ is invertible} if and only if \textbf{$T$ is both 1-1 and onto}.
\end{theorem}
\begin{example}{Example: Find A Linear Transofrmation For A Graphic Transformation}
Let $T:\mathbb{R}^2\rightarrow\mathbb{R}^2$ be a linear transformation that rotates a vector $90\degree$ clockwise about the origin and then scales it by a factor of 2. \\
\begin{tblr}{width = \linewidth,colspec={XX},cells={halign=c,valign=m}}
{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\draw[ultra thick,-Stealth, blue] (0,0) -- (1,0) node[above] {$\begin{bmatrix}1\\0\end{bmatrix}$};
\draw[ultra thick, -Stealth,red] (0,0) -- (0,-2) node[above right] {$\begin{bmatrix}0\\-2\end{bmatrix}$};
\end{axis}
\end{tikzpicture}}
&
{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\draw[ultra thick,-Stealth, blue] (0,0) -- (0,1) node[right] {$\begin{bmatrix}0\\1\end{bmatrix}$};
\draw[ultra thick, -Stealth,red] (0,0) -- (2,0) node[above] {$\begin{bmatrix}2\\0\end{bmatrix}$};
\end{axis}
\end{tikzpicture}} \\
{$T\(\begin{bmatrix}1\\0\end{bmatrix}\)=\begin{bmatrix}0\\-2\end{bmatrix}$} & {$T\(\begin{bmatrix}0\\1\end{bmatrix}\)=\begin{bmatrix}2\\0\end{bmatrix}$}
\end{tblr}
\begin{enumerate}
\item {Find the standard matrix of $T$.
$$T\(\vv{x}\)=A\vv{x}=\begin{bmatrix}0&2\\-2&0\end{bmatrix}\vv{x}$$}
\item {Find the standard matrix of $T^{-1}$.
$$T^{-1}\(\vv{x}\)=A^{-1}\vv{x}=\frac{1}{0+4}\begin{bmatrix}0&-2\\2&0\end{bmatrix}\vv{x}=\begin{bmatrix}0&-1/2\\1/2&0\end{bmatrix}\vv{x}$$}
\end{enumerate}
\end{example}
\newpage
\section{Section 3.7 Markov Chains}
\begin{example}{Example: Population Example Used Throughout Section}
Each year, the population of a city and its suburbs moves as follows: $20\%$ of the city's population moves to suburbs and $10\%$ of the suburb's population moves to the city.
\begin{enumerate}
\item {If $c_0=\text{ initial city population}$ and $s_0=\text{ initial suburb population}$. Find $c_1$ and $s_1$ (the populations in each after one year)
\begin{center}
\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
\node[ellipse, draw] (c) at (0,0) {City};
\node (s) [ellipse, draw, right=of c] {Suburbs};
\path[every node/.style={font=\sffamily\small}, every loop/.style={}]
(c) edge[bend left, -Stealth] node[pos = 0.5, above] {$20\%$} (s)
(s) edge[bend left, -Stealth] node[pos = 0.5, below] {$10\%$} (c)
(c) edge[loop left, Stealth-] node[pos = 0.8, above] {$80\%$} ()
(s) edge[loop right, -Stealth] node[pos = 0.2, above] {$90\%$} ();
\end{tikzpicture}}
\end{center}
\begin{align*}
c_1&=0.8c_0+0.1s_0 \\
s_1&=0.9s_0+0.2c_0 \\
\begin{bmatrix}c_1\\s_1\end{bmatrix}&=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\begin{bmatrix}c_0\\s_0\end{bmatrix}
\end{align*}}
\item {How can we find $c_2$ and $s_2$ (the population in each after 2 years.)?
\begin{align*}
c_2&=0.8c_1+0.1s_1 \\
s_2&=0.2c_1+0.9s_1 \\
\begin{bmatrix}c_2\\s_2\end{bmatrix}&=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\begin{bmatrix}c_1\\s_1\end{bmatrix}
\end{align*}}
\end{enumerate}
\end{example}
Often the population can be divided into various groups called \textbf{states}.
\begin{example}{Example: States}
City and Suburbs
\end{example}
We can use \textbf{state vectors} $\vv{x_0},\vv{x_1},\vv{x_2},\hdots,\vv{x_k},\hdots$ to show how the population is divided at a specific point in time.
\begin{example}{Example: State Vectors}
$$\vv{x_0}=\begin{bmatrix}c_0\\s_0\end{bmatrix},\text{ }\vv{x_1}=\begin{bmatrix}c_1\\s_1\end{bmatrix},\text{ }\vv{x_2}=\begin{bmatrix}c_2\\s_2\end{bmatrix}$$
\end{example}
We use \textbf{transition matrix} $P$ to go from one state vector to the next in the sequence of state vectors.
$$\vv{x_0}\xrightarrow{P}\vv{x_1}\xrightarrow{P}\vv{x_2},\hdots,\vv{x_k}\xrightarrow{P}\vv{x}_{k+1}$$
The sequence of state vectors is called a \textbf{markov chain}. \\
$\vv{x_0}$ is called the \textbf{initial state vector}.
\begin{example}{Example: Initial State Vector}
$$P=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\qquad\vv{x_0}=\begin{bmatrix}c_0\\s_0\end{bmatrix}$$
\end{example}
The transition matrix $P$ is a \textbf{stochastic matrix}: a stochastic matrix is a $n\times n$ matrix with non negative entries where the entries in each column sum to one.
\begin{align*}
\vv{x_1}&=P\vv{x_0} \\
\vv{x_2}&=P\vv{x_1}=P\(P\vv{x_0}\)=P^2\vv{x_0} \\
\vv{x_3}&=P\vv{x_2}=P\(P^2\vv{x_0}\)=P^3\vv{x_0} \\
&\vdots \\
\vv{x_k}&=P\vv{x}_{k-1}=P^k\vv{x_0}
\end{align*}
\begin{example}{Example: Moving From One State To Another State}
$P=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}$ \\
City: State 1 \\
Suburbs: State 2 \\
$P_{21}=0.2$ is the probability of moving from State 1 to State 2 in one step.
\end{example}
The probability of moving from State $j$ to State $i$ in one step is $P_{ij}$. (The entries in the transition matrix are the transition probabilities.)
\begin{example}{Example: Moving From One State To Another In More Then One Step}
What is the probability of moving from the city to the subarbs in 2 years? \\ \newline
Way 1:
\begin{center}
\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}[level distance = 0.4cm,
arrow/.style={edge from parent/.style={draw,-latex}},
level 2/.style = {level distance = 3cm},
level 3/.style={sibling distance=0.8cm}]
\node {City}
child[grow=right] {
child[arrow] {node {Suburbs}
child[arrow] {node[draw, ellipse]{Suburbs} edge from parent node[below, pos = 0.5] {0.9}}
child[arrow] {node{City} edge from parent node[above, pos = 0.5] {0.1}}
edge from parent node[below, pos = 0.5] {0.2}}
child[arrow] {node {City}
child[arrow] {node[draw, ellipse]{Suburbs} edge from parent node[below, pos = 0.5] {0.2}}
child[arrow] {node{City} edge from parent node[above, pos = 0.5] {0.8}}
edge from parent node[above, pos = 0.5] {0.8}}
};
\end{tikzpicture}}
\end{center}
$$\(0.8\)\(0.2\)+\(0.2\)\(0.9\)=0.34$$
Way 2:
$$P^2=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}=\begin{bNiceMatrix}[first-row, first-col]
&\downarrow&\\
&0.66&0.17\\
\rightarrow&0.34&0.83
\end{bNiceMatrix}=Q$$
The entries in $P^2$ are the transition probabilities where the time step is 2 years.
$$Q_{21}=0.34=34\%$$
\end{example}
The probability of moving from State $j$ to State $i$ in $k$ steps is $\(P^k\)_{ij}$.
\begin{example}{Example: Using A Initial State Vector}
If $60\%$ of the population currently lives in the city, what percantage will live in the city after one year? After 2 years? $\(\text{Recall: }P=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\)$ \\ \newline
State 1: City \\
State 2: Suburbs \\ \newline
$\vv{x_0}=\begin{bmatrix}0.6\\0.4\end{bmatrix}$
$$\vv{x_1}=P\vv{x_0}=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\begin{bmatrix}0.6\\0.4\end{bmatrix}=\begin{bmatrix}0.48+0.04\\0.12+0.36\end{bmatrix}=\begin{bmatrix}0.52\\0.48\end{bmatrix}$$
$52\%$ of the population is in the city after one year.
$$\vv{x_2}=P\vv{x_1}=P^2\vv{x_0}=\begin{bmatrix}0.8&0.1\\0.2&0.9\end{bmatrix}\begin{bmatrix}0.52\\0.48\end{bmatrix}=\begin{bmatrix}0.464\\0.536\end{bmatrix}$$
$46.4\%$ in the city after 2 years.
\end{example}
State vectors either contain population numbers or proportions. If they contain proportions, the state vectors are \textbf{probability vectors}: a vector with nonnegative entries that sum to one.
$$\vv{x_0},\vv{x_1},\vv{x_2},\hdots,\vv{x_k},\vv{x}_{k+1},\hdots\rightarrow\vv{x}$$
Does a markov chain converge? Yes, when $P$ is a \textbf{regular matrix}: an $n\times n$ matrix where some power of $P$ contains all positive entries.
\begin{example}{Example: Getting A Tast Of A Convergent Markov Chain}
Consider $P=\begin{bmatrix}1/2&1/4\\1/2&3/4\end{bmatrix}$ and $\vv{x_0}=\begin{bmatrix}12\\12\end{bmatrix}$. \\ \newline
Given $\vv{x_1}=\begin{bmatrix}9\\15\end{bmatrix}$, $\vv{x_2}=\begin{bmatrix}8.25\\15.75\end{bmatrix}$, and $\vv{x_3}=\begin{bmatrix}8.0625\\15.9375\end{bmatrix}$, what might we suspect happens to $\vv{x_k}$ as $k\rightarrow\infty$? \\ \newline
It looks like $\vv{x_k}\rightarrow\begin{bmatrix}8\\16\end{bmatrix}$ as $k\rightarrow\infty$. \\ \newline
What is $P\begin{bmatrix}8\\16\end{bmatrix}$?
$$P\begin{bmatrix}8\\16\end{bmatrix}=\begin{bmatrix}1/2&1/4\\1/2&3/4\end{bmatrix}\begin{bmatrix}8\\16\end{bmatrix}=\begin{bmatrix}4+4\\4+12\end{bmatrix}=\begin{bmatrix}8\\16\end{bmatrix}$$
$$\vv{x_0},\vv{x_1},\vv{x_2},\vv{x_3},\hdots,\vv{x_k},\vv{x}_{k+1},\hdots,\begin{bmatrix}8\\16\end{bmatrix},\begin{bmatrix}8\\16\end{bmatrix},\begin{bmatrix}8\\16\end{bmatrix},\hdots$$
\end{example}
\begin{definition}{Definition: Steady State Vector}
A \textbf{steady state vector} for a transition matrix $P$ is a vector $\vv{x}$ such that $P\vv{x}=\vv{x}$. \\ \newline
The \textbf{steady state probability vector} for $P$ is a probability vector $\vv{x}$ such that $P\vv{x}=\vv{x}$.
\end{definition}
How can we find steady state vectors? \\ \newline
We want
\begin{align*}
P\vv{x}&=\vv{x} \\
P\vv{x}-\vv{x}&=0 \\
\(P-I_n\)\vv{x}&=0
\end{align*}
Which is a homogeneous linear system. Which also means that null$\(P-I_n\)$ contains the steady state vectors of $P$.
\begin{example}{Example: Finding the Steady State Vector}
$P=\begin{bmatrix}1/2&1/4\\1/2&3/4\end{bmatrix}$ and $\vv{x_0}=\begin{bmatrix}12\\12\end{bmatrix}$ \\ \newline
Find the steady state vector of $P$. Solve $\(P-I_2\)\vv{x}=0$?
$$P-I_2=\begin{bmatrix}\frac{1}{2}-1&\frac{1}{4}\\\frac{1}{2}&\frac{3}{4}-1\end{bmatrix}=\begin{bmatrix}-1/2&1/4\\1/2&-1/4\end{bmatrix}$$
Row Reduce:
$$\begin{bmatrix}-1/2&1/4\\1/2&-1/4\end{bmatrix}\xrightarrow{R_2+R_1}\begin{bmatrix}-1/2&1/4\\0&0\end{bmatrix}\xrightarrow{-2R_1}\begin{bmatrix}1&-1/2\\0&0\end{bmatrix}$$
Solve System Of Equations:
\begin{align*}
x_1-\frac{1}{2}x_2&=0 & x_1&=\frac{1}{2}t \\
0&=0 & x_2&=t
\end{align*}
$$\vv{x}=\begin{bmatrix}\frac{1}{2}t\\t\end{bmatrix}=t\begin{bmatrix}1/2\\1\end{bmatrix}$$
If $\vv{x_0}=\begin{bmatrix}12\\12\end{bmatrix}$, then total population is 24. Thus we would need $x_1+x_2=24$.
\begin{align*}
\frac{1}{2}t+t &= 24 \\
\frac{3}{2}t&= 24 \\
t &= 16
\end{align*}
Putting it all together
$$\vv{x}=\begin{bmatrix}\frac{1}{2}\(16\)\\16\end{bmatrix}=\begin{bmatrix}8\\16\end{bmatrix}$$
\end{example}
\begin{example}{Example: Finding The Steady State Probability Vector}
Let $P=\begin{bmatrix}0.7&0.1&0.1\\0.2&0.8&0.2\\0.1&0.1&0.7\end{bmatrix}$. Find the steady state probability vector for $P$.
$$P-I_3=\begin{bmatrix}-0.3&0.1&0.1\\0.2&-0.2&0.2\\0.1&0.1&-0.3\end{bmatrix}\xrightarrow[10R_3]{\begin{subarray}{r}10R_1 \\ 10R_2\end{subarray}}\begin{bmatrix}-3&1&1\\2&-2&2\\1&1&-3\end{bmatrix}\xrightarrow{Row\text{ }Reduce}\begin{bmatrix}1&0&-1\\0&1&-2\\0&0&0\end{bmatrix}$$
Solve System of Equations:
\begin{align*}
x_1-x_3&=0 & x_1 &=t \\
x_2-2x_3&=0 & x_2&=2t \\
0&=0 & x_3&=t
\end{align*}
Solve $x_1+x_2+x_3=1$:
\begin{align*}
x_1+x_2+x_3&=1 \\
t+2t+t&=1 \\
4t&=1 \\
t&=\frac{1}{4}
\end{align*}
$$\vv{x}=\begin{bmatrix}1/4\\1/2\\1/4\end{bmatrix}$$
\end{example}
\newpage
\section{Section 4.1 Eigenvalues and Eigenvectors}
\begin{example}{Example: What Do You Notice?}
Let $A=\begin{bmatrix}1&2\\3&2\end{bmatrix}$, $\vv{u}=\begin{bmatrix}2\\3\end{bmatrix}$, $\vv{v}=\begin{bmatrix}1\\-1\end{bmatrix}$. \\ \newline
Calculate $A\vv{u}$ and $A\vv{v}$. What do you notice?
\begin{align*}
A\vv{u}&=\begin{bmatrix}1&2\\3&2\end{bmatrix}\begin{bmatrix}2\\3\end{bmatrix}=\begin{bmatrix}8\\12\end{bmatrix}=4\begin{bmatrix}2\\3\end{bmatrix}=4\vv{u} \\
A\vv{v}&=\begin{bmatrix}1&2\\3&2\end{bmatrix}\begin{bmatrix}1\\-1\end{bmatrix}=\begin{bmatrix}-1\\1\end{bmatrix}=-1\begin{bmatrix}1\\-1\end{bmatrix}=-1\vv{v}
\end{align*}
$A\vv{u}=4\vv{u}$ and $A\vv{v}=-1\vv{v}$
\end{example}
\begin{definition}{Definition: Eignenvectors and Eigenvalues}
Let $A$ be a $n\times n$ matrix. A nonzero vector $\vv{x}$ is an \textbf{eigenvector} of $A$ with corresponding \textbf{eigenvalue} $\lambda$ (lambda) if \ul{$A\vv{x}=\lambda\vv{x}$}.
\begin{example}{Eignenvectors and Eigenvalues}
In previous example, $\vv{u}$ is an eigenvector of $A$ with corresponding eigenvalue $\lambda=4$.
\end{example}
\end{definition}
\begin{example}{Example: Steady State Vectors Are Eigenvectors}
If $P\vv{x}=\vv{x}$ and $\vv{x}\ne\vv{0}$ then $\vv{x}$ is a eigenvector of $P$ with corresponding eigenvalue $\lambda=1$.
\end{example}
\begin{example}{Example: Finding Eigenvalues Given Vectors}
$A=\begin{bmatrix}4&0&-2\\2&5&4\\0&0&5\end{bmatrix}$, $\vv{x}=\begin{bmatrix}-2\\0\\1\end{bmatrix}$, $\vv{y}=\begin{bmatrix}1\\1\\1\end{bmatrix}$. \\
Are $\vv{x}$ and $\vv{y}$ eigenvectors of $A$? If so, find their corresponding eignenvalues. \\ \newline
As $\vv{x}\ne\vv{0}$ and $\vv{y}\ne\vv{0}$ then we can check $A\vv{x}$ and $A\vv{y}$:
$$A\vv{x}=\begin{bmatrix}-10\\0\\5\end{bmatrix}=5\begin{bmatrix}-2\\0\\1\end{bmatrix}=5\vv{x}$$
$\vv{x}$ is an eigenvector of $A$ with $\lambda=5$.
$$A\vv{y}=\begin{bmatrix}2\\11\\5\end{bmatrix}\ne\lambda\begin{bmatrix}1\\1\\1\end{bmatrix}$$
As for all $\lambda$ $A\vv{y}\ne\lambda\vv{y}$ then $\vv{y}$ is \textbf{not} an eigenvector of $A$.
\end{example}
\begin{example}{Example: Is $0$ A Valid Eigenvalue}
$A=\begin{bmatrix}1&1\\1&1\end{bmatrix}\qquad\vv{u}=\begin{bmatrix}2\\-2\end{bmatrix}$\\ \newline
Is $\vv{u}$ a eigenvector of $A$?
$$A\vv{u}=\begin{bmatrix}1&1\\1&1\end{bmatrix}\begin{bmatrix}2\\-2\end{bmatrix}=\begin{bmatrix}0\\0\end{bmatrix}=0\begin{bmatrix}2\\-2\end{bmatrix}=0\vv{u}\Rightarrow\lambda=0$$
$\star$ Note: $\vv{0}$ is never an eigenvector but $\lambda=0$ can be an eigenvalue.
\end{example}
How can we find the eigenvalues of an $n\times n$ matrix? \\ \newline
Let $A$ be an $n\times n$ matrix. Then:
\begin{align*}
\text{$\lambda$ is an eigenvalue of $A$}\Leftrightarrow& A\vv{x}=\lambda\vv{x}\text{ for some }\vv{x}\ne\vv{0} \\
\Leftrightarrow&A\vv{x}-\lambda\vv{x}=\vv{0} \text{ for some }\vv{x}\ne\vv{0}\\
\Leftrightarrow&\underbrace{\(A-\lambda I_n\)}_{n\times n}\vv{x}=\vv{0}\text{ for some }\vv{x}\ne\vv{0} \\
\Leftrightarrow&A-\lambda I_n \text{ is noninvertible} \\
\Leftrightarrow&\text{det}\(A-\lambda I_n\)=0
\end{align*}
With the determinate of a noninvertible matrix equaling 0, then we can conclude that $\lambda$ is an eigenvalue of $A$ if and only if det$\(A-\lambda I_n\)=0$.
\begin{definition}{Definition: Characteristic Equation and Characteristic Polynomial}
det$\(A-\lambda I_n\)=0$ is the \textbf{characteristic equation} of $A$. \\ \newline
det$\(A-\lambda I_n\)$ is the \textbf{characteristic polynomial} (with degree $n$) of $A$.
\end{definition}
\begin{example}{Example: Finding The Eigenvalues Of A Matrix}
Find the eigenvalues of $A=\begin{bmatrix}1&2\\3&2\end{bmatrix}$. \\ \newline
To find $\lambda$, solve det$\(A-\lambda I_n\)=0$.
$$A-\lambda I_2=\begin{bmatrix}1&2\\3&2\end{bmatrix}-\lambda\begin{bmatrix}1&0\\0&1\end{bmatrix}=\begin{bmatrix}1-\lambda&2\\3&2-\lambda\end{bmatrix}$$
Now solving for when the determinate is $0$
\begin{align*}
\abs{\begin{matrix}1-\lambda&2\\3&2-\lambda\end{matrix}}&= 0 \\
\(1-\lambda\)\(2-\lambda\)-2\(3\)&=0\\
2-\lambda-2\lambda+\lambda^2-6&=0\\
\lambda^2-3\lambda-4&=0\\
\(\lambda-4\)\(\lambda+1\)&=0
\end{align*}
\fbox{$\lambda_1=4$, $\lambda_2=-1$}
\end{example}
$\star$ Note: As a characteristic polynomial of a $2\times2$ matrix has a degree of 2 then a $2\times2$ matrix will have at most 2 distinct eigenvalues (but can have fewer). \\ \newline
Let $A$ be an $n\times n$ matrix.
\begin{itemize}
\item {To find eigenvalues of $A$, solve det$\(A-\lambda I_n\)=0$.}
\item {To find eigenvectors of $A$ corresponding to $\lambda$, we solve $\(A-\lambda I_n\)\vv{x}=\vv{0}$. Any nontrivial solution to this homogeneous linear system is an eigenvector of $A$ corresponding to $\lambda$.}
\end{itemize}
\begin{definition}{Definition: Eigenspace}
Let $A$ be an $n\times n$ matrix with eigenvalue $\lambda$. Then $E_\lambda=\text{null}\(A-\lambda I_n\)$ is the \textbf{eigenspace} of $A$ corresponding to $\lambda$. \\ \newline
$\star$ Note: $E_\lambda$ is a subspace of $\mathbb{R}^n$.
\end{definition}
\begin{example}{Example: Finding A Basis For A Eignespace}
Let $A=\begin{bmatrix}2&1&1\\1&2&1\\1&1&2\end{bmatrix}$. One eignevalue of $A$ is $\lambda=1$. Find a basis for $E_\lambda$.
$$A-1I_3=\begin{bmatrix}2-1&1&1\\1&2-1&1\\1&1&2-1\end{bmatrix}=\begin{bmatrix}1&1&1\\1&1&1\\1&1&1\end{bmatrix}\xrightarrow[R_3-R_1]{R_2-R_1}\begin{bNiceMatrix}
1&1&1\\
0&0&0\\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}$$
Solving sytem of equations:
\begin{align*}
x_1+x_2+x_3&=0 & x_1&=-s-t \\
&& x_2&= s\\
&& x_3&=t
\end{align*}
$$\vv{x}=\begin{bmatrix}-s-t\\s\\t\end{bmatrix}=s\begin{bmatrix}-1\\1\\0\end{bmatrix}+t\begin{bmatrix}-1\\0\\1\end{bmatrix}$$
\begin{align*}
E_\lambda&=\text{span}\(\begin{bmatrix}-1\\1\\0\end{bmatrix},\begin{bmatrix}-1\\0\\1\end{bmatrix}\) \leftarrow\text{eigenspace (contains infinitely many vectors)} \\
\Aboxed{\mathcal{B}_{E_\lambda}&=\left\{\begin{bmatrix}-1\\1\\0\end{bmatrix},\begin{bmatrix}-1\\0\\1\end{bmatrix}\right\}} \leftarrow\text{basis for eigenspace (contains 2 vectors)}
\end{align*}
\end{example}
\begin{example}{Example: Find Eigenvalues Then Find The Eigenspace For Each}
Let $A=\begin{bmatrix}1&1\\-1&3\end{bmatrix}$. Find all distinct eigenvalues of $A$. Then find a basis for each eigenspace. \\ \newline
To find $\lambda$, solve det$\(A-\lambda I_2\)=0$:
\begin{align*}
\text{det}\(\begin{bmatrix}1-\lambda&1\\-1&3-\lambda\end{bmatrix}\)&= 0\\
\(1-\lambda\)\(3-\lambda\)-1\(-1\)&=0 \\
3-4\lambda+\lambda^2+1&=0 \\
\(\lambda-2\)\(\lambda-2\)&=0 \\
\Aboxed{\lambda&=2}
\end{align*}
$A$ has one distinct eigenvalue. Now finding $E_\lambda$ by finding null$\(A-\lambda I\)$:
$$A-2I_2=\begin{bmatrix}1-2&1\\-1&3-2\end{bmatrix}=\begin{bmatrix}-1&1\\-1&1\end{bmatrix}\xrightarrow{Row\text{ }Reduce}\begin{bNiceMatrix}[first-row]
&\downarrow\\
1&-1\\
0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}$$
Solving system of equations:
\begin{align*}
x_1-x_2&=0 & x_1&=t \\
x_1&=x_2 & x_2&=t
\end{align*}
$$\vv{x}=t\begin{bmatrix}1\\1\end{bmatrix}$$
Finding the eigenspace:
\begin{align*}
E_\lambda&=\text{span}\(\begin{bmatrix}1\\1\end{bmatrix}\) \\
\Aboxed{\mathcal{B}_{E_\lambda}&=\left\{\begin{bmatrix}1\\1\end{bmatrix}\right\}}
\end{align*}
\end{example}
\subsection{Introduction to Complex Numbers}
\begin{definition}{Definition: Complex Numbers}
The number $z=a+bi$ where $i=\sqrt{-1}$ $\(i^2=-1\)$ and $a,b\in\mathbb{R}$ is called a \textbf{complex number} with $\underbrace{\text{\textbf{real part}}}_{Re\(z\)=a}$ $a$ and a $\underbrace{\text{\textbf{imaginary part}}}_{Im\(z\)=b}$ $b$.
\end{definition}
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=l,valign=m}}
{The \textbf{complex conjugate} of $z=a+bi$ is $\bar{z}=a-bi$. \\ \newline
The \textbf{magnitude} of $z=a+bi$ is $\abs{z}=\sqrt{a^2+b^2}$.} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -2.5, ymax = 2.5,
xmin = -2.5, xmax = 2.5,
axis line style = {Stealth-Stealth, ultra thick},
xlabel = {$Re$},
ylabel = {$Im$},
xtick = {1.25},
xticklabels = {a},
ytick = {-2, 2},
yticklabels = {-b,b},
clip = false,
]
\draw[ultra thick] (0,0) -- (1.25,2) node[pos = 0.5, rotate = 57.9946167919, above] {$\abs{z}$};
\addplot[mark=*, ultra thick] coordinates {(1.25,2)} node[right] {$z=a+bi$};
\draw[ultra thick] (0,0) -- (1.25,-2) node[pos = 0.5, rotate = -57.9946167919, below] {$\abs{\bar{z}}$};
\addplot[mark=*, ultra thick] coordinates {(1.25,-2)} node[right] {$\bar{z}=a-bi$};
\end{axis}
\end{tikzpicture}}}
\end{tblr}
\subsection{Complex Number Operations}
\begin{example}{Example: Complex Number Operations}
Let $z=2+2i$ and $w=1-i$. Calculate $\abs{z}$, $\bar{w}$, $z+w$, $z-w$, $zw$, $\displaystyle{\frac{z}{w}}$.
\begin{align*}
\abs{z}&=\sqrt{2^2+2^2}=\sqrt{8}=2\sqrt{2} \\
\bar{w}&=1+i \\
z+w&=2+2i+1-i=3+i \\
z-w&=2+2i-\(1-i\)=2+2i-1+i=1+3i \\
zw&=\(2+2i\)\(1-i\)=2-2i+2i-2i^2=2-2\(-1\)=2+2=4 \\
\frac{z}{w}&=\frac{z}{w}\(\frac{\bar{w}}{\bar{w}}\)=\(\frac{2+2i}{1-i}\)\(\frac{1+i}{1+i}\)=\frac{2+2i+2i+2i^2}{1+i-i-i^2}=\frac{2+4i+2\(-1\)}{1-\(-1\)}=\frac{4i}{2}=2i
\end{align*}
\end{example}
\begin{example}{Example: Finding Complex Eigenvalues}
Find the eigenvalues of $A=\begin{bmatrix}1&2\\-2&1\end{bmatrix}$.
\begin{align*}
\text{det}\(A-\lambda I_2\)&= 0 \\
\abs{\begin{matrix}1-\lambda&2\\-2&1-\lambda\end{matrix}}&= 0 \\
\(1-\lambda\)\(1-\lambda\)-2\(-2\)&=0 \\
1-\lambda-\lambda+\lambda^2+4&=0 \\
\lambda^2-2\lambda+5&=0 \\
\lambda&=\frac{-b\pm\sqrt{b^2-4ac}}{2a} \\
\lambda&=\frac{-\(-2\)\pm\sqrt{\(-2\)^2-4\(1\)\(5\)}}{2\(1\)} \\
&=\frac{2\pm\sqrt{4-20}}{2} \\
&=\frac{2\pm\sqrt{-16}}{2} \\
&=\frac{2\pm4i}{2} \\
\lambda&=1\pm2i
\end{align*}
\fbox{$\begin{aligned}\lambda_1&=1+2i\\\lambda_2&=1-2i\end{aligned}$}
\end{example}
$\star$ Note: Complex eigenvalues occur in complex conjugate pairs. \\ \newline
If $\lambda$ is an eigenvalue of $A$, then $\bar{\lambda}$ is also an eigenvalue of $A$.
\begin{example}{Example: Finding The Eigenspace Of A Complex Eigenvalue}
Find a basis for each eigenspace in the previous example:
\begin{itemize}
\item {$\lambda_1=1+2i$ \\ \newline
Calculate $A-\(1+2i\)I_2$:
$$A-\(1+2i\)I_2=\begin{bmatrix}1-\(1+2i\)&2\\-2&1-\(1+2i\)\end{bmatrix}=\begin{bmatrix}-2i&2\\-2&-2i\end{bmatrix}$$
Row Reduce:
$$\begin{bmatrix}-2i&2\\-2&-2i\end{bmatrix}\xrightarrow{R_1\leftrightarrow R_2}\begin{bmatrix}-2&-2i\\-2i&2\end{bmatrix}\xrightarrow{-\frac{1}{2}R_1}\begin{bmatrix}1&i\\-2i&2\end{bmatrix}\xrightarrow{R_2+2iR_1}\begin{bmatrix}1&i\\0&0\end{bmatrix}$$
Solving System of Equations:
\begin{align*}
x_1+ix_2&=0 & x_1 &= -it \\
x_1&=-ix_2 & x_2&=t
\end{align*}
$$\vv{x}=\begin{bmatrix}-it\\t\end{bmatrix}=\begin{bmatrix}-i\\1\end{bmatrix}$$
Get the eigenspace:
\begin{align*}
E_{\lambda_1}&=\text{span}\(\begin{bmatrix}-i\\1\end{bmatrix}\) \\
\Aboxed{\mathcal{B}_{E_{\lambda_1}}&=\left\{\begin{bmatrix}-i\\1\end{bmatrix}\right\}}
\end{align*}}
\item {$\lambda_2=1-2i$
\begin{align*}
E_{\lambda_2}&=\text{span}\(\begin{bmatrix}i\\1\end{bmatrix}\) \\
\mathcal{B}_{E_{\lambda_2}}&=\left\{\begin{bmatrix}i\\1\end{bmatrix}\right\}
\end{align*}
If $\begin{bmatrix}a+bi\\c+di\end{bmatrix}$ is an eigenvector of $A$ corresponding to the eigenvalue $\lambda$, then $\begin{bmatrix}a-bi\\c-di\end{bmatrix}$ is an eigenvector of $A$ corresponding to $\bar{\lambda}$.}
\end{itemize}
\end{example}
\newpage
\section{Section 4.2 Determinants}
\begin{definition}{Definition: Determinant Of \text{$2\times2$}}
If $A=\begin{bmatrix}a&b\\c&d\end{bmatrix}$, then the \textbf{determinant} of $A$ is the scalar:
$$\text{det}\(A\)=\abs{A}=\abs{\begin{matrix}a&b\\c&d\end{matrix}}=ad-bc$$
\end{definition}
\begin{definition}{Definition: General Determinant}
If $A$ is an $n\times n$ matrix where $n\ge2$, then the \textbf{determinant} of $A$ is the scalar obtained through a \textbf{cofactor expansion}. The cofactor expansion along the $1^{\text{st}}$ row of $A$ is below.
$$\text{det}\(A\)=\(-1\)^{1+1}a_{11}\text{det}\(A_{11}\)+\(-1\)^{1+2}a_{12}\text{det}\(A_{12}\)+\hdots+\(-1\)^{1+n}a_{1n}\text{det}\(A_{1n}\)$$
where $A_{ij}$ is the $\(n-1\)\times\(n-1\)$ matrix obtained by removing the $i^{\text{th}}$ row and the $j^{\text{th}}$ column of $A$.
\end{definition}
\begin{example}{Example: Calculating The Determinant Of A $3\times3$ Matrix}
Calculate det$A$ where $A=\text{\begin{blockarray}{cccc}
\begin{block}{ [ cc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{A11}{1} & \tikzmarknode[inner sep=3pt]{A12}{-4} & \tikzmarknode[inner sep=3pt]{A13}{2}\\
2 & 1 & -1 \\
1 & 1 & 1 \\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(A11.north) (A12.north) (A13.north)(A13.east) (A13.south)(A12.south) (A11.south) (A11.west)};
\end{tikzpicture}$
\begin{align*}
\text{det}A&=\(-1\)^{1+1}\(1\)\abs{\begin{matrix}1&-1\\1&1\end{matrix}}+\(1\)^{1+2}\(-4\)\abs{\begin{matrix}2&-1\\1&1\end{matrix}}+\(-1\)^{1+3}\(2\)\abs{\begin{matrix}2&1\\1&1\end{matrix}} \\
&=1\(1+1\)+4\(2+1\)+2\(2-1\) \\
&=2+12+2 \\
\Aboxed{\text{det}A&=16}
\end{align*}
\end{example}
\textbf{Recall:} For an $n\times n$ matrix $A$,
\begin{itemize}
\item If det$A=0\Longleftrightarrow A$ is not invertible
\item If det$A\ne0\Longleftrightarrow A$ is invertible
\end{itemize}
So the matrix in the last example is invertible since det$A\ne0$.
\begin{theorem}{Theorem: Don't Need To Start A Cofactor Expansion On The First Entry}
Let $A$ be an $n\times n$ matrix. Then we can use a cofactor expansion along any row or down any column of $A$ to calculate det$A$.
\begin{itemize}
\item Choose a row or column of $A$
\item We get the sum of $n$ terms: $\(-1\)^{i+j}a_{ij}\text{det}\(A_{ij}\)$
\end{itemize}
(If we do an expansion along a row, $i$ is fixed. If we do an expansion along a column, $j$ is fixed.) \\ \newline
\textbf{Cofactor expansion along the $i^{\text{th}}$ row:}
$$\text{det}A=\(-1\)^{i+1}a_{i1}\text{det}\(A_{i1}\)+\hdots+\(-1\)^{i+n}a_{in}\text{det}\(A_{in}\)$$
\textbf{Cofactor expansion along the $j^{\text{th}}$ column:}
$$\text{det}A=\(-1\)^{1+j}a_{1j}\text{det}\(A_{1j}\)+\hdots+\(-1\)^{n+j}a_{nj}\text{det}\(A_{nj}\)$$
\end{theorem}
\begin{example}{Example: Cofactor Expansion Down $3^{\text{rd}}$ Column}
$A= \text{
\begin{blockarray}{cccc}
\begin{block}{ [ cc c ]}
\bigstrut[t]
1 & -4 &\tikzmarknode[inner sep=3pt]{B11}{2}\\
2 & 1& \tikzmarknode[inner sep=3pt]{B21}{-1}\\
1&1& \tikzmarknode[inner sep=3pt]{B31}{1} \\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B31.south) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture}$ \\ \newline
Cofactor Expansion Down $3^{\text{rd}}$ Column:
\begin{align*}
\text{det}A&=\(-1\)^{1+3}\(2\)\abs{\begin{matrix}2&1\\1&1\end{matrix}}+\(-1\)^{2+3}\(-1\)\abs{\begin{matrix}1&-4\\1&1\end{matrix}}+\(-1\)^{3+3}\(1\)\abs{\begin{matrix}1&-4\\2&1\end{matrix}} \\
&=2\(2-1\)+\(1+4\)+\(1+8\) \\
&=2+5+9 \\
&=16
\end{align*}
\end{example}
\begin{example}{Example: Cofactor Expansion Along $4^{\text{th}}$ Column}
Calculate det$\(B\)$ where $B= \text{
\begin{blockarray}{cccc}
\begin{block}{ [ ccc c ]}
\bigstrut[t]
1 & 0 & 3 &\tikzmarknode[inner sep=3pt]{B11}{0}\\
0 & 1& 1&\tikzmarknode[inner sep=3pt]{B21}{0}\\
1&0& 2&\tikzmarknode[inner sep=3pt]{B31}{0} \\
0&1& 0&\tikzmarknode[inner sep=3pt]{B41}{1} \\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B41.east) (B41.south) (B41.west) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture}$ \\ \newline
Cofactor Expansion Along $4^{\text{th}}$ Column:
\begin{align*}
\text{det}B&=0+0+0+\(-1\)^{4+4}\(1\)\text{
\begin{blockarray}{|ccc|}
\begin{block}{|ccc|}
1 & \tikzmarknode[inner sep=3pt]{B11}{0}&3\\
0 & \tikzmarknode[inner sep=3pt]{B21}{1}&1\\
1& \tikzmarknode[inner sep=3pt]{B31}{0}&2 \\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B31.south) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture} \\
&=0+\(-1\)^{2+2}\(1\)\abs{\begin{matrix}1&3\\1&2\end{matrix}}+0 \\
&=2-3 \\
\Aboxed{\text{det}B&=-1}
\end{align*}
det$B=-1\ne0$ so $B$ is invertible.
\end{example}
\begin{definition}{Definition: Upper Triangular Form}
A $n\times n$ matrix is called \textbf{upper triangular} if all of the entries lying below the main diagonal are zero.
\begin{example}{Example: Upper Triangular Form}
$\text{
\begin{blockarray}{ccc}
\begin{block}{ [ cc c ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{2} & 3 & -1\\
0& \tikzmarknode[inner sep=3pt]{B21}{8} & 0\\
0&0&\tikzmarknode[inner sep=3pt]{B31}{3}\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.south west) (B11.south) (B21.south) (B31.south) (B31.south east)};
\end{tikzpicture}$
\end{example}
\end{definition}
\begin{definition}{Definition: Lower Triangular Form}
A $n\times n$ matrix is called \textbf{lower triangular} if all of the entries lying above the main diagonal are zero.
\begin{example}{Example: Lower Triangular Form}
$\text{
\begin{blockarray}{cc}
\begin{block}{ [ cc ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{3} & 0\\
2& \tikzmarknode[inner sep=3pt]{B21}{4}\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.north west) (B11.north) (B21.north) (B21.north east)};
\end{tikzpicture}$
\end{example}
\end{definition}
\begin{definition}{Definition: Diagonal Matrix}
An $n\times n$ matrix that is both upper and lower triangular is called a \textbf{diagonal matrix}.
\begin{example}{Example: Diagonal Matrix}
$\text{
\begin{blockarray}{cccc}
\begin{block}{ [ cccc ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{2} & 0 & 0 & 0\\
0 & 1 & 0 & 0 \\
0 & 0 & -8 & 0 \\
0 & 0 & 0 & \tikzmarknode[inner sep=3pt]{B21}{0}\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.south west) (B11.south) (B21.south) (B21.south east)};
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.north west) (B11.north) (B21.north) (B21.north east)};
\end{tikzpicture}$
\end{example}
\end{definition}
\begin{example}{Example: Calculating Determinant Of A Upper Triangular Matrix}
Calculate $\abs{\begin{matrix}a&b&c\\0&d&e\\0&0&f\end{matrix}}$
\begin{align*}
\text{
\begin{blockarray}{ccc}
\begin{block}{ | cc c |}
\tikzmarknode[inner sep=3pt]{B11}{a} & b & c\\
\tikzmarknode[inner sep=3pt]{B21}{0} & d & e\\
\tikzmarknode[inner sep=3pt]{B31}{0} &0 & f\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B31.south) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture}&=\(-1\)^{1+1}a\abs{\begin{matrix}d&e\\0&f\end{matrix}} \\
&=a\(df-0\) \\
&=adf
\end{align*}
\end{example}
\begin{example}{Example: Determinant Of A Lower Triangular Matrix}
Calculate $\text{
\begin{blockarray}{cccc}
\begin{block}{ | cccc |}
\tikzmarknode[inner sep=3pt]{B11}{2} & 0 & 0 & 0\\
3 & 1 & 0 & 0 \\
-1 & 0 & 4 & 0 \\
0 & 0 & 0 & \tikzmarknode[inner sep=3pt]{B21}{5}\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.north west) (B11.north) (B21.north) (B21.north east)};
\end{tikzpicture}$
\begin{align*}
\text{
\begin{blockarray}{cccc}
\begin{block}{ | cccc |}
\tikzmarknode[inner sep=3pt]{B11}{2} & 0 & 0 & 0\\
3 & 1 & 0 & 0 \\
-1 & 0 & 4 & 0 \\
0 & 0 & 0 & \tikzmarknode[inner sep=3pt]{B21}{5}\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.north west) (B11.north) (B21.north) (B21.north east)};
\end{tikzpicture}&= 2\text{
\begin{blockarray}{ccc}
\begin{block}{ | cc c |}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{1} & 0 & 0\\
0& \tikzmarknode[inner sep=3pt]{B21}{4} & 0\\
0&0&\tikzmarknode[inner sep=3pt]{B31}{5}\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, ultra thick, dotted] plot[] coordinates {(B11.north west) (B11.north) (B21.north) (B31.north) (B31.north east)};
\end{tikzpicture}\\
&=2\(1\)\abs{\begin{matrix}4&0\\0&5\end{matrix}} \\
&=2\(1\)\(4\)\(5\) \\
&=40
\end{align*}
\end{example}
\begin{theorem}{Theorem: Getting The Determinant From A Triangular Matrix}
If $A$ is an $n\times n$ triangular matrix (upper/lower/diagonal) then
$$\text{det}A=\underbrace{a_{11}a_{22}a_{33}\hdots a_{nn}}_{\text{Product of Main Diagonal}}$$
\end{theorem}
Any REF of an $n\times n$ matrix is an upper triangular matrix. We can use row reduction to help us find det$A$ but each row operation affects the determinant differently:
\subsection{How the 3 Elementary Row Operations Affect Determinant}
\begin{enumerate}
\item {\textbf{Row Replacement:} does not change the determinant: replace $R_i$ with $R_i+kR_j$ $\(i\ne j\)$
\begin{example}{Example: Row Rplacement Not Affecting Determinant}
$$A=\begin{bmatrix}1&2\\3&4\end{bmatrix}\xrightarrow{R_2-3R_1}\begin{bmatrix}1&2\\0&-2\end{bmatrix}=B$$
$$\text{det}A=4-6=-2\qquad\text{det}B=-2$$
$\text{det}A=\text{det}B$ \\ \newline
$\star$ Note:
$$A=\begin{bmatrix}1&2\\3&4\end{bmatrix}\xrightarrow{3R_1-R_2}\begin{bmatrix}1&2\\0&2\end{bmatrix}=C$$
$$\text{det}A=-2\qquad\text{det}C=2$$
$\text{det}A\ne\text{det}C$ because $3R_1-R_2$ is \textbf{not} a row replacement.
\end{example}}
\item {\textbf{Row Interchange:} negates (multiply by $-1$) the determinant: $R_i\leftrightarrow R_j$
\begin{example}{Example: Row Interchange Negating Determinant}
$$A=\begin{bmatrix}0&3\\2&1\end{bmatrix}\xrightarrow{R_1\leftrightarrow R_2}\begin{bmatrix}2&1\\0&3\end{bmatrix}=B$$
$$\text{det}A=-6\qquad\text{det}B=6$$
\begin{align*}
\text{det}B&=-\text{det}A \\
\text{det}A&=-\text{det}B
\end{align*}
\end{example}}
\item {\textbf{Scaling A Row:} by $k$ scales the determinant by $k$: $kR_i$
\begin{example}{Example: Scaling A Row Scales The Determinant}
$$A=\begin{bmatrix}2&4\\0&3\end{bmatrix}\xrightarrow{\frac{1}{2}R_1}\begin{bmatrix}1&2\\0&3\end{bmatrix}=B$$
$$\text{det}A=6\qquad\text{det}B=3$$
\begin{align*}
\text{det}B&=\frac{1}{2}\text{det}A & \text{det}B&=\frac{1}{k}\text{det}A \\
\text{det}A&=2\text{det}B & \text{det}A&=\frac{1}{k}\text{det}B \\
&& k&\ne0
\end{align*}
\end{example}}
\end{enumerate}
\begin{example}{Example: Calculating The Determinant Through Row Reduction}
Calculate det$A$ where $A=\begin{bmatrix}1&5&-6\\-1&-4&4\\-2&-7&9\end{bmatrix}$. \\ \newline
Row Reduce:
$$\begin{bNiceMatrix}
1&5&-6\\
-1&-4&4\\
-2&-7&9
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow[\begin{subarray}{l}\text{det}\\ \text{Unchanged}\end{subarray}]{\begin{subarray}{c}R_2+R_1 \\ R_3+2R_1\end{subarray}}\begin{bNiceMatrix}
1&5&-6\\
0&1&-2\\
0&3&-3
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}\xrightarrow[\begin{subarray}{l}\text{det}\\ \text{Unchanged}\end{subarray}]{R_3-3R_2}\begin{bmatrix}1&5&-6\\0&1&-2\\0&0&3\end{bmatrix}=B$$
Find det$B$:
\begin{align*}
\abs{\begin{matrix}1&5&-6\\0&1&-2\\0&3&-3\end{matrix}}&=\(-1\)^{1+1}\(1\)\abs{\begin{matrix}1&-2\\3&-3\end{matrix}} \\
&=-3+6 \\
&=3
\end{align*}
Find det$A$:
\begin{align*}
\text{det}A&=\text{det}B \\
&=1\(1\)\(3\) \\
\Aboxed{\text{det}A&=3}
\end{align*}
\end{example}
\begin{example}{Example: Calculating Determinant Of A $5\times 5$ Matrix}
Calculate the determinant of $A=\begin{bmatrix}2&1&-1&3&5\\1&1&2&1&1\\-2&-1&1&3&1\\1&2&2&1&1\\4&2&-2&2&2\end{bmatrix}$. \\ \newline
\textbf{Way 1:}\\
Row Reduce:
\begin{align*}
A\xrightarrow[\(\begin{subarray}{r}\text{Negate}\\\text{det}\end{subarray}\)]{R_1\leftrightarrow R_2}&\begin{bmatrix}1&1&2&1&1\\2&1&-1&3&5\\-2&-1&1&3&1\\1&2&2&1&1\\4&2&-2&2&2\end{bmatrix} \\
\xrightarrow[\(\begin{subarray}{r}\text{det}\\\text{Unchanged}\end{subarray}\)]{\begin{subarray}{c}R_2-2R_1\\R_3+2R_1\\R_4-R_1\\R_5-4R_1\end{subarray}}&\underbrace{\begin{bmatrix}1&1&2&1&1\\0&-1&-5&1&3\\0&1&5&5&3\\0&1&0&0&0\\0&-2&-10&-2&-2\end{bmatrix}}_{C} \\
\xrightarrow[\(\begin{subarray}{r}\text{Negate}\\\text{det}\end{subarray}\)]{R_2\leftrightarrow R_4}&\begin{bmatrix}1&1&2&1&1\\0&1&0&0&0\\0&1&5&5&3\\0&-1&-5&1&3\\0&-2&-10&-2&-2\end{bmatrix} \\
\xrightarrow[\(\begin{subarray}{r}\text{det}\\\text{Unchanged}\end{subarray}\)]{\begin{subarray}{c}R_3-R_2\\R_4+R_2\\R_5+2R_2\end{subarray}}&\begin{bmatrix}1&1&2&1&1\\0&1&0&0&0\\0&0&5&5&3\\0&0&-5&1&3\\0&0&-10&-2&-2\end{bmatrix} \\
\xrightarrow[\(\begin{subarray}{r}\text{det}\\\text{Unchanged}\end{subarray}\)]{\begin{subarray}{c}R_4+R_3\\R_5+2R_3\end{subarray}}&\begin{bmatrix}1&1&2&1&1\\0&1&0&0&0\\0&0&5&5&3\\0&0&0&6&6\\0&0&0&8&4\end{bmatrix} \\
\xrightarrow[\(\begin{subarray}{r}\text{det}\\\text{Unchanged}\end{subarray}\)]{R_5-\frac{8}{6}R_4}&\underbrace{\begin{bmatrix}1&1&2&1&1\\0&1&0&0&0\\0&0&5&5&3\\0&0&0&6&6\\0&0&0&0&-4\end{bmatrix}}_{B} \\
\end{align*}
Calculate det$A$:
\begin{align*}
\text{det}B&=1\(1\)\(5\)\(6\)\(-4\)=-120 \\
\text{det}A&=\(-1\)\(-1\)\text{det}B=\fbox{$-120$}
\end{align*}
\textbf{Way 2:}\\
$C=\text{
\begin{blockarray}{ccccc}
\begin{block}{ [ ccccc ]}
\bigstrut[t]
\tikzmarknode[inner sep=3pt]{B11}{1} & 1 & 2 & 1 & 1\\
\tikzmarknode[inner sep=3pt]{B21}{0} & -1 & -5 & 1 & 3\\
\tikzmarknode[inner sep=3pt]{B31}{0}&1&5&5&3\\
\tikzmarknode[inner sep=3pt]{B41}{0}&1&0&0&0\\
\tikzmarknode[inner sep=3pt]{B41}{0}&-2&-10&-2&-2\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B11.east) (B21.east) (B31.east) (B41.east) (B41.south) (B41.west) (B31.west) (B21.west) (B11.west)};
\end{tikzpicture}\qquad\text{det}A=-\text{det}C$ \\ \newline
Finding det$C$:
$$\text{det}C=\(-1\)^{1+1}\(1\)\text{
\begin{blockarray}{cccc}
\begin{block}{ | cccc |}
-1 & -5 & 1 & 3\\
1 & 5 & 5 & 3\\
\tikzmarknode[inner sep=3pt]{B11}{1}&\tikzmarknode[inner sep=3pt]{B21}{0}& \tikzmarknode[inner sep=3pt]{B31}{0}&\tikzmarknode[inner sep=3pt]{B41}{0}\\
-2&-10&-2&-2\\
\end{block}
\end{blockarray}}
\begin{tikzpicture}[remember picture,overlay]
\draw[red, thick] plot[smooth cycle] coordinates {(B11.north) (B21.north) (B31.north) (B41.north) (B41.east) (B41.south) (B31.south) (B21.south) (B11.south) (B11.west)};
\end{tikzpicture}=\(-1\)^{3+1}\(1\)\abs{\begin{matrix}-5&1&3\\5&5&3\\-10&-2&-2\end{matrix}}$$
Row Reduce:
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 3pt}
}
\begin{align*}
\begin{bmatrix}-5&1&3\\5&5&3\\-10&-2&-2\end{bmatrix}\xrightarrow[\begin{subarray}{l}\text{det}\\\text{Unchanged}\end{subarray}]{\begin{subarray}{c}R_2+R_1\\R_3-2R_1\end{subarray}}\begin{bNiceMatrix}
-5 & 1 & 3 \\
0 & 6 & 6 \\
0 & -4 & 8
\CodeAfter
\tikz \draw[red, thick] plot[smooth cycle] coordinates {(1-1.north) (1-1.east) (2-1.east) (3-1.east) (3-1.south) (3-1.west) (2-1.west) (1-1.west)};
\tikz \node[rectangle, draw, fit=(2-2)(2-3)(3-2)(3-3), inner sep = -2, red, thick] {};
\end{bNiceMatrix}&=\(-1\)^{1+1}\(-5\)\abs{\begin{matrix}6&6\\-4&-8\end{matrix}} \\
&=-5\(-48+24\) \\
&=-5\(-24\) \\
&=120
\end{align*}
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 0pt}
}
Finding det$A$:
\begin{align*}
\text{det}C&=120 \\
\text{det}A&=-\text{det}C=-120
\end{align*}
\end{example}
\subsection{Properties of Determinants}
Let $A$ and $B$ be $n\times n$ matrices
\begin{enumerate}
\item {If $A$ has a zero row or a zero column, det$A=0$.}
\item {If $A$ has 2 identical rows or 2 identical columns, det$A=0$.}
\item {det$\(I_n\)=1$}
\item {det$\(AB\)=\(\text{det}A\)\(\text{det}B\)$}
\item {det$\(A^T\)=\text{det}\(A\)$}
\item {If $A$ is invertible, det$\displaystyle{\(A^{-1}\)=\frac{1}{\text{det}A}}$.}
\item {det$\(kA\)=k^n\text{det}A$ for any $k\in\mathbb{R}$.}
\end{enumerate}
$\star$ Note: In general, det$\(A+B\)\ne\text{det}A+\text{det}B$
\begin{example}{Example: \text{det$\(A+B\)\ne\text{det}A+\text{det}B$}}
$$A=\underbrace{\begin{bmatrix}1&0\\0&1\end{bmatrix}}_{\text{det}A=1}\qquad B=\underbrace{\begin{bmatrix}-1&0\\0&-1\end{bmatrix}}_{\text{det}B=1}\qquad A+B=\underbrace{\begin{bmatrix}0&0\\0&0\end{bmatrix}}_{\text{det}\(A+B\)=0}$$
\end{example}
\subsection{One More Way to Calculate $3\times3$ Determinants}
\begin{example}{Example: One More Way to Calculate $3\time3$ Determinants}
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 3pt}
}
$A=\begin{NiceMatrix}
1&2&-1&1&2\\
3&4&-2&3&4\\
-1&1&1&-1&1
\CodeAfter
\SubMatrix[{1-1}{3-3}]
\tikz \draw[red, thick, name = a] plot[smooth cycle] coordinates {(1-1.north)(2-2.north)(3-3.north)(3-3.east)(3-3.south)(2-2.south)(1-1.south)(1-1.west)};
\tikz \draw[red, thick] plot[smooth cycle] coordinates {(1-2.north)(2-3.north)(3-4.north)(3-4.east)(3-4.south)(2-3.south)(1-2.south)(1-2.west)};
\tikz \draw[red, thick] plot[smooth cycle] coordinates {(1-3.north)(2-4.north)(3-5.north)(3-5.east)(3-5.south)(2-4.south)(1-3.south)(1-3.west)};
\tikz \draw[blue, thick] plot[smooth cycle] coordinates {(1-3.north)(2-2.north)(3-1.north)(3-1.west)(3-1.south)(2-2.south)(1-3.south)(1-3.east)};
\tikz \draw[blue, thick] plot[smooth cycle] coordinates {(1-4.north)(2-3.north)(3-2.north)(3-2.west)(3-2.south)(2-3.south)(1-4.south)(1-4.east)};
\tikz \draw[blue, thick] plot[smooth cycle] coordinates {(1-5.north)(2-4.north)(3-3.north)(3-3.west)(3-3.south)(2-4.south)(1-5.south)(1-5.east)};
\tikz \draw[-latex, thick] ($ (1-1.north west) + (-0.3,0.2) $) -- (1-1.north west);
\tikz \draw[-latex, thick] ($ (1-2.north west) + (-0.3,0.2) $) -- (1-2.north west);
\tikz \draw[-latex, thick] ($ (1-3.north west) + (-0.3,0.2) $) -- (1-3.north west);
\tikz \draw[-latex, thick] ($ (3-1.south west) - (0.3,0.2) $) -- (3-1.south west);
\tikz \draw[-latex, thick] ($ (3-2.south west) - (0.3,0.2) $) -- (3-2.south west);
\tikz \draw[-latex, thick] ($ (3-3.south west) - (0.3,0.2) $) -- (3-3.south west);
\end{NiceMatrix}$\hspace{2.5cm}$\star$ Note: This only works for $3\times3$ matrices.
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 0pt}
}
\begin{align*}
1\(4\)\(1\)&=4 & \(-1\)\(4\)\(-1\)&=4 \\
2\(-2\)\(-1\)&=4 & \(1\)\(-2\)\(1\)&=-2 \\
-1\(3\)\(1\)&=-3 & \(1\)\(3\)\(2\)&=6
\end{align*}
$$\(4+4-3\)-\(4-2+6\)=5-8=\fbox{$-3$}$$
\end{example}
\newpage
\section{Section 4.3: Eigenvalues, Eigenvectors and Eigenspaces for $n\times n$ Matrices}
\textbf{Recall:} For an $n\times n$ matrix $A$, if $A\vv{x}=\lambda\vv{x}$ for some $\vv{x}\ne\vv{0}$, then $\vv{x}$ is an \textbf{eigenvector} of $A$ corresponding to the \textbf{eigenvalue} $\lambda$. \\ \newline
\textbf{To find the eigenvalues of $A$:} Solve
$$\overbrace{\underbrace{\text{det}\(A-\lambda I_n\)}_{\text{Characteristic Polynomial}}=0}^{\text{Characteristic Equation}}$$
for $\lambda$. \\ \newline
\textbf{To find eigenvectors/eigenspaces of $\lambda$:} Solve
$$\(A-\lambda I_n\)\vv{x}=0$$
for $\vv{x}$. Any nonzero solution is an eigenvector of $A$ corresponding to $\lambda$. The \textbf{eigenspace} of $A$ corresponding to $\lambda$ is
$$E_\lambda=\text{null}\(A-\lambda I_n\)$$
\begin{example}{Example: Finding Eigenvalues Of A $3\times3$ Matrix}
Find the eigenvalues of $A=\begin{bmatrix}1&1&0\\1&1&0\\0&1&1\end{bmatrix}$.
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 4pt}
}
\begin{align*}
0=\abs{\begin{NiceMatrix}
1-\lambda&1&0\\
1&1-\lambda&0\\
0&1&1-\lambda
\CodeAfter
\tikz \draw[red, thick] plot[smooth cycle] coordinates {(1-3.north)(1-3.east)(2-3.east)(3-3.east)(3-3.south)(3-3.west)(2-3.west)(1-3.west)};
\end{NiceMatrix}}&=\(-1\)^{3+3}\(1-\lambda\)\abs{\begin{matrix}1-\lambda&1\\1&1-\lambda\end{matrix}} \\
&=\(1-\lambda\)\[\(1-\lambda\)^2-1\] \\
&=\(1-\lambda\)\(1-2\lambda+\lambda^2-1\) \\
&=\(1-\lambda\)\(\lambda^2-2\lambda\) \\
0&=\(1-\lambda\)\(\lambda\)\(\lambda-2\)
\end{align*}
\fbox{$\lambda_1=1,\text{ }\lambda_2=0,\text{ }\lambda_3=2$}
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 0pt}
}
\end{example}
\begin{example}{Example: Finding The Eigenvalues Of Another $3\times3$ Matrix}
Find the eigenvalues of $B=\begin{bmatrix}4&0&-2\\2&5&4\\0&0&5\end{bmatrix}$
\begin{align*}
0=\abs{\begin{matrix}4-\lambda&0&-2\\2&5-\lambda&4\\0&0&5-\lambda\end{matrix}}&=\(-1\)^{3+3}\(5-\lambda\)\abs{\begin{matrix}4-\lambda&0\\2&5-\lambda\end{matrix}} \\
&=\(5-\lambda\)\(4-\lambda\)\(5-\lambda\) \\
&=\(5-\lambda\)^2\(4-\lambda\)
\end{align*}
\fbox{$\lambda_1=5,\text{ }\lambda_2=4$}
\end{example}
\begin{example}{Example: Finding The Eigenvalues Of A Triangular $5\times5$ Matrix}
Find the eigenvalues of $C=\begin{bmatrix}4&0&1&2&1\\0&9&1&-1&2\\0&0&9&0&1\\0&0&0&2&0\\0&0&0&0&-1\end{bmatrix}$ \\
\begin{align*}
\text{det}\(C-\lambda I\)&=\(4-\lambda\)\(9-\lambda\)^2\(2-\lambda\)\(-1-\lambda\) \\
\Aboxed{\lambda_1&=4,\text{ }\lambda_2=9,\text{ }\lambda_3=2,\text{ }\lambda_4=-1}
\end{align*}
\end{example}
$\star$ The eigenvalues of a triangular matrix are its main diagonal entries.
\subsection{Algebraic and Geometric Multiplicities of $\lambda$}
\begin{theorem}{Theorem: There Are $n$ Many Eigenvalues}
Let $A$ be an $n\times n$ matrix. Then det$\(A-\lambda I\)$ has $n$ roots $\lambda_1,\lambda_2,\hdots,\lambda_n$ (possibly complex and possibly repeated) and
$$\text{det}\(A-\lambda I\)=\(-1\)^n\(\lambda-\lambda_1\)\(\lambda-\lambda_2\)\hdots\(\lambda-\lambda_n\)$$
\end{theorem}
\begin{definition}{Definition: Algebraic Multiplicity}
Let $A$ be an $n\times n$ matrix and suppose det$\(A-\lambda I\)$ has $k$ \textbf{distinct} roots $\lambda_1,\lambda_2,\hdots,\lambda_k$. Then
$$\text{det}\(A-\lambda I\)=\(-1\)^n\(\lambda-\lambda_1\)^{n_1}\(\lambda-\lambda_2\)^{n_2}\hdots\(\lambda-\lambda_k\)^{n_k}$$
where $n_1+n_2+\hdots+n_k=n$. We call $n_i$ the \textbf{algebraic multiplicity} of the eigenvalue $\lambda_i$.
\end{definition}
\begin{example}{Example: Finding The Algebraic Multiplicties Of A Matrix}
$A=\begin{bmatrix}1&1&0\\1&1&0\\0&1&1\end{bmatrix}\qquad\text{det}\(A-\lambda I\)=\(1-\lambda\)\(\lambda\)\(\lambda-2\)$
\begin{center}
\begin{tblr}{colspec = {QQ}, hlines, vlines, cells = {halign = c, valign = m}}
Eigenvalues & Algebraic Multiplicity \\
1&1 \\
0&1 \\
2&1
\end{tblr}
\end{center}
\end{example}
\begin{example}{Example: Finding The Algebraic Multiplicties Of Another Matrix}
$B=\begin{bmatrix}4&0&-2\\2&5&4\\0&0&5\end{bmatrix}\qquad\text{det}\(B-\lambda I\)=\(5-\lambda\)^2\(4-\lambda\)$
\begin{center}
\begin{tblr}{colspec = {QQ}, hlines, vlines, cells = {halign = c, valign = m}}
Eigenvalues & Algebraic Multiplicity \\
5&2 \\
4&1
\end{tblr}
\end{center}
\end{example}
If $\lambda$ is an eigenvalue of $n\times n$ matrix $A$, then $1\le\text{Algebraic Multiplicity of }\lambda\le n$.
\begin{example}{Example: Algebraic Multiplicities For A $4\times4$ Matrix}
Find the eigenvalues and their algebraic multiplicites for $A=\begin{bmatrix}2&0&0&0\\0&1&0&1\\0&0&2&0\\0&1&0&1\end{bmatrix}$.
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 4pt}
}
\begin{align*}
0=\abs{\begin{NiceMatrix}
2-\lambda&0&0&0 \\
0&1-\lambda&0&1\\
0&0&2-\lambda&0\\
0&1&0&1-\lambda
\CodeAfter
\tikz \draw[red, thick] plot[smooth cycle] coordinates {(1-1.north)(1-2.north)(1-3.north)(1-4.north)(1-4.east)(1-4.south)(1-3.south)(1-2.south)(1-1.south)(1-1.west)};
\end{NiceMatrix}}&=\(-1\)^{1+1}\(2-\lambda\)\abs{\begin{NiceMatrix}
1-\lambda&0&1\\
0&2-\lambda&0 \\
1&0&1-\lambda
\CodeAfter
\tikz \draw[red, thick] plot[smooth cycle] coordinates {(2-1.north)(2-2.north)(2-3.north)(2-3.east)(2-3.south)(2-2.south)(2-1.south)(2-1.west)};
\end{NiceMatrix}} \\
&=\(2-\lambda\)\[\(-1\)^{2+2}\(2-\lambda\)\abs{\begin{matrix}1-\lambda&1\\1&1-\lambda\end{matrix}}\] \\
&=\(2-\lambda\)^2\(\(1-\lambda\)^2-1\) \\
&=\(2-\lambda\)^2\(1-2\lambda+\lambda^2-1\) \\
&=\(2-\lambda\)^2\(\lambda^2-2\lambda\) \\
&=-\lambda\(2-\lambda\)^3
\end{align*}
\pgfset
{
nicematrix/cell-node/.append style =
{inner sep = 0pt}
}
\begin{center}
\begin{tblr}{colspec = {QQ}, hlines, vlines, cells = {halign = c, valign = m}}
Eigenvalues & Algebraic Multiplicity \\
{$\lambda_1=0$} & 1 \\
{$\lambda_2=2$} &3
\end{tblr}
\end{center}
\end{example}
\textbf{Recall:} $E_\lambda=\text{null}\(A-\lambda I_n\)$ is the eigenspace of $A$ corresponding to the eigenvalue $\lambda$.
\begin{definition}{Definition: Geometric Multiplicity}
Let $A$ be an $n\times n$ matrix and let $\lambda$ be an eigenvalue of $A$. The \textbf{geometric multiplicity} of $\lambda$ is dim$\(E_\lambda\)=\text{nullity}\(A-\lambda I_n\)$.
\end{definition}
\begin{example}{Example: Finding The Geometric Multiplicity Of A $3\times3$ Matrix}
$A=\begin{bmatrix}4&0&-2\\2&5&4\\0&0&5\end{bmatrix}$ has the characteristic polynomial $\(5-\lambda\)^2\(4-\lambda\)$. Find the geometric multiplicity of each eigenvalue.
\begin{itemize}
\item {$\lambda_1=5$: Row Reduce
$$A-5I=\begin{bmatrix}-1&0&-2\\2&0&4\\0&0&0\end{bmatrix}\xrightarrow{-R_1}\begin{bmatrix}1&0&2\\2&0&4\\0&0&0\end{bmatrix}\xrightarrow{R_2-2R_1}\begin{bNiceMatrix}
1&0&2\\
0&0&0\\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\end{bNiceMatrix}$$
Solve System of Equations:
\begin{align*}
x_1+2x_3&=0 & x_1&=-2t \\
&& x_2&=s \\
&& x_3&=t
\end{align*}
$$\vv{x}=\begin{bmatrix}-2t\\s\\t\end{bmatrix}=\begin{bmatrix}0\\s\\0\end{bmatrix}+\begin{bmatrix}-2t\\0\\t\end{bmatrix}=s\begin{bmatrix}0\\1\\0\end{bmatrix}+t\begin{bmatrix}-2\\0\\1\end{bmatrix}$$
$\mathcal{B}_{E_{\lambda_1}}=\left\{\begin{bmatrix}0\\1\\0\end{bmatrix},\begin{bmatrix}-2\\0\\1\end{bmatrix}\right\}$ thus dim$\(E_{\lambda_1}\)=2$ so $\lambda_1$ has a geometric multiplicity of 2.}
\item{$\lambda_2=4:\qquad A-4I\rightarrow\hdots$ \\ \newline
$\mathcal{B}_{E_{\lambda_2}}=\left\{\begin{bmatrix}-1\\2\\0\end{bmatrix}\right\}$ thus dim$\(E_{\lambda_2}\)=1$ so $\lambda_2$ has geometric multiplicity of 1.}
\end{itemize}
\begin{center}
\begin{tblr}{colspec = {QQ}, hlines, vlines, cells = {halign = c, valign = m}}
Eigenvalues & Algebraic Multiplicity & Geometric Multiplicity\\
{$\lambda_1=5$} & 2 & 2\\
{$\lambda_2=4$} &1 & 1
\end{tblr}
\end{center}
\end{example}
\begin{example}{Example: Finding The Algebraic Multipllicity And Geometric Multiplicity}
$A=\begin{bmatrix}1&0&0\\0&1&1\\1&0&0\end{bmatrix}$ has characteristic polynomial $-\lambda\(1-\lambda\)^2$. Find all distinct eigenvalues of $A$. Then find the algebraic and geometric multiplicity of each eigenvalue.
\begin{align*}
\lambda_1&=0:\text{ }A-0I\rightarrow\begin{bNiceMatrix}[first-row]
&&\downarrow\\
1&0&0\\
0&1&1\\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-2) circle (2mm) ;
\end{bNiceMatrix}\qquad\text{nullity}\(A-0I\)=1 \\
\lambda_2&=1:\text{ }A-1I\rightarrow\begin{bNiceMatrix}[first-row]
&\downarrow&\\
1&0&0\\
0&0&1\\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\end{bNiceMatrix}\qquad\text{nullity}\(A-1I\)=1
\end{align*}
\begin{center}
\begin{tblr}{colspec = {QQ}, hlines, vlines, cells = {halign = c, valign = m}}
Eigenvalues & Algebraic Multiplicity & Geometric Multiplicity\\
{$\lambda_1=0$} & 1 & 1\\
{$\lambda_2=1$} &2 & 1
\end{tblr}
\end{center}
\end{example}
Let $A$ be a $n\times n$ matrix with eigenvalue $\lambda$. Then
$$1\le\text{Geometric Multiplicity of }\lambda\le\text{Algebraic Multiplicity of }\lambda\le n$$
If algebraic multiplicity of $\lambda$ is 1, then the geometric multiplicity of $\lambda$ is also 1.
\begin{example}{Example: Finding $A^3\vv{v}$}
Let $A$ be a $2\times2$ matrix where $\vv{v_1}=\begin{bmatrix}1\\-1\end{bmatrix}$ and $\vv{v_2}=\begin{bmatrix}1\\2\end{bmatrix}$ are eigenvectors of $A$ corresponding to the eigenvalues of $\lambda_1=-1$ and $\lambda_2=2$ respectively. $\(A\vv{v_i}=\lambda_i\vv{v_i}\)$ \\
Calculate $A^3\begin{bmatrix}5\\1\end{bmatrix}$. $\(\text{Hint: }\begin{bmatrix}5\\1\end{bmatrix}=3\begin{bmatrix}1\\-1\end{bmatrix}+2\begin{bmatrix}1\\2\end{bmatrix}\)$
$$A\vv{v_1}=\lambda_1\vv{v_1}\qquad A\vv{v_2}=\lambda_2\vv{v_2}\qquad\begin{bmatrix}5\\1\end{bmatrix}=3\vv{v_1}+2\vv{v_2}$$
\begin{align*}
A^3\begin{bmatrix}5\\1\end{bmatrix}&=A^3\(3\vv{v_1}+2\vv{v_2}\) & A\vv{v_1}&=\lambda_1\vv{v_1} \\
&=3A^3\vv{v_1}+2A^3\vv{v_2} & A^2\vv{v_1}&=A\(A\vv{v_1}\) \\
&=3\lambda_1^3\vv{v_1}+2\lambda_2^3\vv{v_2}& &=A\(\lambda_1\vv{v_1}\) \\
&=3\(-1\)^3\begin{bmatrix}1\\-1\end{bmatrix}+2\(2\)^3\begin{bmatrix}1\\2\end{bmatrix} & &=\lambda_1\(A\vv{v_1}\) \\
&=-3\begin{bmatrix}1\\-1\end{bmatrix}+16\begin{bmatrix}1\\2\end{bmatrix} & &=\lambda_1\(\lambda_1\vv{v_1}\)\\
\Aboxed{A^3\begin{bmatrix}5\\1\end{bmatrix}&=\begin{bmatrix}13\\35\end{bmatrix}} & &=\lambda^2\vv{v_1}
\end{align*}
\end{example}
Suppose $A$ is an $n\times n$ matrix with eigenvectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ with corresponding eigenvalues $\lambda_1,\lambda_2,\hdots,\lambda_k$. \textbf{If $\vv{x}\in\mathbb{R}^n$ can be written as $\vv{x}=c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_k\vv{v_k}$} then
$$A^m\vv{x}=c_1\lambda_1^m\vv{v_1}+c_2\lambda_2^m\vv{v_2}+\hdots+c_k\lambda_k^m\vv{v_k}$$
for any positive integer $m$. If we can make a basis for $\mathbb{R}^n$ using eigenvectors of $A$ (called an \textbf{eigenvector basis}) then we can do this proccess for any $\vv{x}\in\mathbb{R}^n$.
\begin{theorem}{Theorem: Eigenvectors And Linearly Independence}
Let $A$ be an $n\times n$ matrix, and let $\lambda_1,\lambda_2,\hdots,\lambda_k$ be distinct eigenvalues of $A$ with corresponding eigenvectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ (so $A\vv{v_i}=\lambda_i\vv{v_i}$) then $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ are linearly independent.
\end{theorem}
\begin{example}{Example: Eigenvector Basis}
$A=\begin{bmatrix}4&0&-2\\2&5&4\\0&0&5\end{bmatrix}\qquad\begin{aligned}\lambda_1&=5&\mathcal{B}_{E_{\lambda_1}}&=\left\{\begin{bmatrix}0\\1\\0\end{bmatrix},\begin{bmatrix}-2\\0\\1\end{bmatrix}\right\} \\
\lambda_2&=4&\mathcal{B}_{E_{\lambda_2}}&=\left\{\begin{bmatrix}-1\\2\\0\end{bmatrix}\right\}\end{aligned}$ \\
$\left\{\begin{bmatrix}0\\1\\0\end{bmatrix},\begin{bmatrix}-2\\0\\1\end{bmatrix},\begin{bmatrix}-1\\2\\0\end{bmatrix}\right\}$ is a basis for $\mathbb{R}^3$ consisting of eigenvectors of $A$.
\end{example}
\textbf{Recall:} That for the last example \begin{tabular}{l}
{geometric multiplicites of $\lambda_1$ is 2} \\
{geometric multiplicites of $\lambda_2$ is 1}
\end{tabular}
$2+1=3$ for $\mathbb{R}^3$ \\ \newline
If geometric multiplicities of eigenvalues of $A$ sum to $n$ (when $A$ is $n\times n$), we can create a basis for $\mathbb{R}^n$ using eigenvectors of $A$.
\newpage
\section{Section 4.4 Diagonalization}
\textbf{Recall:} A \textbf{diagonal matrix} $D$ is an $n\times n$ matrix whose only nonzero entries are on the main diagonal:
$$D=\begin{bNiceMatrix}
d_1&0&\Cdots&0\\
0&d_2&&\Vdots\\
\Vdots&&\Ddots&0\\
0&\Cdots&0&d_n
\end{bNiceMatrix}$$
Diagonal Matrices allow for simplified calculations of determinant, eigenvalues, and powers of the matrix.
\begin{example}{Example: General $2\times2$ Diagonal Matrix To A Power}
$D=\begin{bmatrix}a&0\\0&b\end{bmatrix}$
\begin{align*}
D^2&=\begin{bmatrix}a&0\\0&b\end{bmatrix}\begin{bmatrix}a&0\\0&b\end{bmatrix}=\begin{bmatrix}a^2&0\\0&b^2\end{bmatrix} \\
D^3&=\begin{bmatrix}a&0\\0&b\end{bmatrix}\begin{bmatrix}a^2&0\\0&b^2\end{bmatrix} =\begin{bmatrix}a^3&0\\0&b^3\end{bmatrix} \\
D^k&=\begin{bmatrix}a^k&0\\0&b^k\end{bmatrix}
\end{align*}
For any positive integer $k$. \\ \newline
$\star$ This only works for diagonal matrices.
\end{example}
\begin{example}{Example: Powers Not Being Easy On Non Diagonal Matrices}
$A=\begin{bmatrix}1&2\\3&4\end{bmatrix}$
$$A^2=\begin{bmatrix}1&2\\3&4\end{bmatrix}\begin{bmatrix}1&2\\3&4\end{bmatrix}=\begin{bmatrix}7&10\\15&22\end{bmatrix}\ne\begin{bmatrix}1^2&2^2\\3^2&4^2\end{bmatrix}$$
\end{example}
In general, if $D=\begin{bNiceMatrix}
d_1&0&\Cdots&0\\
0&d_2&&\Vdots\\
\Vdots&&\Ddots&0\\
0&\Cdots&0&d_n
\end{bNiceMatrix}$ then $D^k=\begin{bNiceMatrix}
d_1^k&0&\Cdots&0\\
0&d_2^k&&\Vdots\\
\Vdots&&\Ddots&0\\
0&\Cdots&0&d_n^k
\end{bNiceMatrix}$ for any positive integer $k$.
\subsection{Similarity}
\begin{definition}{Definition: Similar}
Definition: Let $A$ and $B$ be $n\times n$ matrices. We say $A$ is \textbf{similar} to $B$, denoted $A\sim B$, if there exists an invertible matrix $P$ such that $B=P^{-1}AP$.
\end{definition}
\textbf{Properties of Similarity:}
\begin{itemize}
\item $A\sim A$
\item {If $A\sim B$ then $B\sim A\text{. }\(A=PBP^{-1}\)$}
\item {If $\underbrace{A\sim B}_{B=P^{-1}AP}$ and $\underbrace{B\sim C}_{C=Q^{-1}BQ}$ then $A\sim C$ more specifically:
$$C=Q^{-1}BQ=Q^{-1}P^{-1}APQ=\(PQ\)^{-1}A\(PQ\)$$}
\end{itemize}
Given an $n\times n$ matrix $A$, can we find an $n\times n$ diagonal matrix $D$ such that $A\sim D$? \\ \newline
The answer is \textbf{sometimes}.
\begin{theorem}{Theorem: What It Means To Be Similar}
Let $A$ and $B$ be similar matricies. ($A\sim B$ which means $A=PBP^{-1}$ for some invertible $P$) Then:
\begin{enumerate}
\item {det$A=\text{det}B$}
\item {rank$A=\text{rank}B$}
\item {$\star$ $A$ and $B$ have the same characteristic polynomial: det$\(A-\lambda I\)=\text{det}\(B-\lambda I\)$}
\item {$\star$ $A$ and $B$ have the same eigenvalues.}
\item {$A^m\sim B^m$ for all positive integers $m$.}
\item {If $A$ is invertible then $A^m\sim B^m$ for all integers $m$.}
\end{enumerate}
\end{theorem}
\begin{definition}{Definition: Diagonizable And Diagonalization}
Let $A$ be an $n\times n$ matrix. If there exists an invertible matrix $P$ and diagonal matrix $D$ such that $A=PDP^{-1}$, then we say $A$ is \textbf{diagonalizable} and we call $A=PDP^{-1}$ a \textbf{diagonalization} of $A$.
\end{definition}
\textbf{Notes About Diagonalization:}
\begin{itemize}
\item If $A$ is diagonalizable then $A\sim D$.
\item Not all square matrices are diagonalizable.
\item In general if $A$ is diagonalizable, its diagonalization is not unique.
\end{itemize}
\begin{theorem}{Theorem: What Does $P$ And $D$ Look Like?}
Suppose $A$ is an $n\times n$ diagonalizable matrix and $A=PDP^{-1}$ for some invertible matrix $P$ and some diagonal matrix $D$. Then the columns of $P$ form a basis for $\mathbb{R}^n$ consisting of eigenvectors of $A$ (eigenvector basis) and all the main diagonal entries of $D$ are the corresponding eigenvalues.
\end{theorem}
\subsection{Test for Diagonalizability}
Suppose $A$ is an $n\times n$ matrix with $k$ distinct eigenvalues $\lambda_1,\lambda_2,\hdots,\lambda_k$. Then $A$ is diagonalizable if and only if:
$$\text{dim}\(E_{\lambda_1}\)+\text{dim}\(E_{\lambda_2}\)+\hdots+\text{dim}\(E_{\lambda_k}\)=n$$
In other words the sum of the geometric multiplicities must equal $n$ for $A$ to be diagonalizable.
\begin{itemize}
\item {This occurs when the algebraic multiplities of $\lambda$ equals the geometric multiplcities of $\lambda$ for each eigenvalue of $A$.}
\end{itemize}
\begin{example}{Example: Determining Diagonalizability And Diagonalization}
$A=\begin{bmatrix}-1&3\\0&2\end{bmatrix}$ Determine if $A$ is diagonalizable. If so, find a diagonalization of $A$.
\begin{center}
\begin{tblr}{colspec = {QQ},hlines,vlines,cells={halign=c,valign=m}}
Eigenvalues & Algebraic Multiplicities & Geometric Multiplicities \\
$\lambda_1=-1$ & 1 & 1 \\
$\lambda_2=2$ & 1 & 1
\end{tblr}
\end{center}
As $1+1=2$ then $A$ is diagonalizable.
\begin{itemize}
\item {\textbf{$\lambda_1=-1$:} Row Reduce
$$A-\(-1\)I=\begin{bmatrix}-1+1&3\\0&2+1\end{bmatrix}=\begin{bmatrix}0&3\\0&3\end{bmatrix}\rightarrow\begin{bmatrix}0&1\\0&0\end{bmatrix}$$
Solve System of Equations: \\ \newline
As $x_1$ is free and $x_2$ is zero then
$$\vv{x}=\begin{bmatrix}t\\0\end{bmatrix}=t\begin{bmatrix}1\\0\end{bmatrix}$$
Making:
\begin{align*}
E_{\lambda_1}&=\text{span}\(\begin{bmatrix}1\\0\end{bmatrix}\) \\
\mathcal{B}_{E_{\lambda_1}}&=\left\{\begin{bmatrix}1\\0\end{bmatrix}\right\}
\end{align*}}
\item {$\lambda_2=2:$ $A-2I\rightarrow\hdots$
\begin{align*}
E_{\lambda_2}&=\text{span}\(\begin{bmatrix}1\\1\end{bmatrix}\) \\
\mathcal{B}_{E_{\lambda_2}}&=\left\{\begin{bmatrix}1\\1\end{bmatrix}\right\}
\end{align*}}
\end{itemize}
$A$ is diagonalizable
\begin{itemize}
\item {One diagonalization is $P_1D_1P_1^{-1}$ where $P_1=\begin{bmatrix}1&1\\0&1\end{bmatrix}$ and $D_1=\begin{bmatrix}-1&0\\0&2\end{bmatrix}$.}
\item{Another diagonalization is $P_2D_2P_2^{-1}$ where $P_2=\begin{bmatrix}1&1\\1&0\end{bmatrix}$ and $D_2=\begin{bmatrix}2&0\\0&-1\end{bmatrix}$.}
\end{itemize}
\end{example}
How to check your answer:
\begin{align*}
A&=PDP^{-1} \\
\Aboxed{AP&=PD}
\end{align*}
\begin{example}{Example: Calculating A Power Of A Diagonalizable Matrix}
$A=\begin{bmatrix}-1&3\\0&2\end{bmatrix}$, $A=PDP^{-1}$ where $P=\begin{bmatrix}1&1\\1&0\end{bmatrix}$ and $D=\begin{bmatrix}2&0\\0&-1\end{bmatrix}$. Calculate $A^8$. \\ \newline
Are there any useful trends?
\begin{align*}
A&=PDP^{-1} \\
A^2&=PDP^{-1}PDP^{-1}=PDDP^{-1}=PD^2P^{-1} \\
A^3&=\underbrace{PDP^{-1}}_{A}\underbrace{PD^2P^{-1}}_{A^2}=PDD^2P^{-1}=PD^3P^{-1} \\
A^k&=PD^kP^{-1} \text{ for any positive integer $k$}
\end{align*}
Calculating $A^8$:
\begin{align*}
A^8&=PD^8P^{-1} \\
&=\begin{bmatrix}1&1\\1&0\end{bmatrix}\begin{bmatrix}2&0\\0&-1\end{bmatrix}^8\begin{bmatrix}1&1\\1&0\end{bmatrix}^{-1} \\
&=\begin{bmatrix}1&1\\1&0\end{bmatrix}\begin{bmatrix}2^8&0\\0&\(-1\)^8\end{bmatrix}\(\frac{1}{0-1}\begin{bmatrix}0&-1\\-1&1\end{bmatrix}\) \\
&=\begin{bmatrix}1&1\\1&0\end{bmatrix}\begin{bmatrix}256&0\\0&1\end{bmatrix}\begin{bmatrix}0&1\\1&-1\end{bmatrix} \\
&=\begin{bmatrix}256&1\\256&0\end{bmatrix}\begin{bmatrix}0&1\\1&-1\end{bmatrix} \\
A^8&=\begin{bmatrix}1&255\\0&256\end{bmatrix}
\end{align*}
\end{example}
\begin{example}{Example: Finding Diagonalization Of A $3\times3$ Matrix}
Is $A=\begin{bmatrix}2&0&-2\\1&3&2\\0&0&3\end{bmatrix}$ diagonalizable? If so, find a diagonalization of $A$. \\ \newline
Get Eigenvalues:
$$\begin{bmatrix}2&0&-2\\1&3&2\\0&0&3\end{bmatrix}\xrightarrow{R_2-\frac{1}{2}R_3}\begin{bmatrix}2&0&-2\\0&3&3\\0&0&3\end{bmatrix}\Longrightarrow\begin{aligned}\lambda_1&=3\\\lambda_2&=2\end{aligned}$$
\begin{itemize}
\item{\textbf{$\lambda_1=3$:} Row Reduce
$$A-3I=\begin{bmatrix}-1&0&-2\\1&0&2\\0&0&0\end{bmatrix}\xrightarrow{R_2+R_1}\begin{bNiceMatrix}
-1&0&-2\\
0&0&0\\
0&0&0
\CodeAfter
\tikz \draw (1-1) circle (3mm) ;
\end{bNiceMatrix}$$
Solve System of Equations:
\begin{align*}
x_1+2x_3&=0
\end{align*}
$x_2$ and $x_3$ are free so:
$$\vv{x}=\begin{bmatrix}-2t\\s\\t\end{bmatrix}=t\begin{bmatrix}-2\\0\\1\end{bmatrix}+s\begin{bmatrix}0\\1\\0\end{bmatrix}$$
Get A Basis:
\begin{center}
\fbox{$\mathcal{B}_{E_{\lambda_1}}=\left\{\begin{bmatrix}0\\1\\0\end{bmatrix},\begin{bmatrix}-2\\0\\1\end{bmatrix}\right\}$}
\end{center}}
\item{\textbf{$\lambda_2=2$:} $A-2I\rightarrow\hdots\qquad\mathcal{B}_{E_{\lambda_2}}=\left\{\begin{bmatrix}-1\\1\\0\end{bmatrix}\right\}$}
\end{itemize}
Get the Geometric Multiplicities:
\begin{center}
\begin{tblr}{colspec = {QQ},hlines,vlines,cells={halign=c,valign=m}}
Eigenvalues & Algebraic Multiplicities & Geometric Multiplicities \\
$\lambda_1=3$ & 2 & 2 \\
$\lambda_2=2$ & 1 & 1
\end{tblr}
\end{center}
As the sum of the geometric multiplicities is 3 for a $3\times3$ matrix then $A$ is diagonalizable and $A=PDP^{-1}$ where:
$$P=\begin{bmatrix}0&-2&-1\\1&0&1\\0&1&0\end{bmatrix}\qquad \text{and}\qquad D=\begin{bmatrix}3&0&0\\0&3&0\\0&0&2\end{bmatrix}$$
\end{example}
\begin{example}{Example: Not A Diagonalizable Matrix}
Is $A=\begin{bmatrix}1&0&0&0\\0&-1&1&0\\0&0&-1&0\\0&0&0&1\end{bmatrix}$ diagonalizable? \\ \newline
$\lambda_1=-1$: Row Reduce
$$A-\(-1\)I=\begin{bmatrix}2&0&0&0\\0&0&1&0\\0&0&0&0\\0&0&0&2\end{bmatrix}\xrightarrow{R_3\leftrightarrow R_4}\begin{bNiceMatrix}[first-row]
&\downarrow&& \\
2&0&0&0 \\
0&0&1&0 \\
0&0&0&2 \\
0&0&0&0
\CodeAfter
\tikz \draw (1-1) circle (2mm) ;
\tikz \draw (2-3) circle (2mm) ;
\tikz \draw (3-4) circle (2mm) ;
\end{bNiceMatrix}$$
Get Geometric Multiplicity:
$$\text{dim}\(E_{\lambda_1}\)=\text{nullity}\(A-\lambda_1I\)=1$$
\begin{center}
\begin{tblr}{colspec = {QQ},hlines,vlines,cells={halign=c,valign=m}}
$\lambda$ & Algebraic Multiplicities & Geometric Multiplicities \\
$\lambda_1=1$ & 2 & 1 \\
$\lambda_2=1$ & 2 &
\end{tblr}
\end{center}
$A$ is not diagonalizable (the sum of the geometric multiplicities is at most $3\ne4$)
\end{example}
\newpage
\section{Section 5.1 Orthogonality in $\mathbb{R}^n$}
\textbf{Recall:} Let $\vv{u},\vv{v}\in\mathbb{R}^n$. Then $\vv{u}$ and $\vv{v}$ are \textbf{orthogonal} if $\vv{u}\cdot\vv{v}=0$.
\begin{definition}{Definition: Projection}
Let $\vv{u},\vv{v}\in\mathbb{R}^n$ where $\vv{u}\ne\vv{0}$. Then the \textbf{projection} of $\vv{v}$ onto $\vv{u}$ is defined as:
$$\text{proj}_{\vv{u}}\vv{v}=\(\frac{\vv{u}\cdot\vv{v}}{\vv{u}\cdot\vv{u}}\)\vv{v}$$
\end{definition}
\text{Notes on Projection:}
\begin{itemize}
\item {$\text{proj}_{\vv{u}}\vv{v}$ is a scalar multiple of $\vv{u}$ so $\text{proj}_{\vv{u}}\vv{v}$ is in span$\(\vv{u}\)$.}
\item {Calculate $\(\vv{v}-\text{proj}_{\vv{u}}\vv{v}\)\cdot\vv{u}$:
\begin{align*}
\(\vv{v}-\text{proj}_{\vv{u}}\vv{v}\)\cdot\vv{u}&=\vv{v}\cdot\vv{u}-\text{proj}_{\vv{u}}\vv{v}\cdot\vv{u}\\
&=\vv{v}\cdot\vv{u}-\(\frac{\vv{u}\cdot\vv{v}}{\cancel{\vv{u}\cdot\vv{u}}}\)\cancel{\vv{u}\cdot\vv{u}} \\
&=\vv{v}\cdot\vv{u}-\vv{u}\cdot\vv{v} \\
&=\vv{u}\cdot\vv{v}-\vv{u}\cdot\vv{v} \\
&=0
\end{align*}
Thus $\vv{v}-\text{proj}_{\vv{u}}\vv{v}$ and $\vv{u}$ are orthogonal.
$$\vv{v}=\underbrace{\(\text{proj}_{\vv{u}}\vv{v}\)}_{\text{In span$\(\vv{u}\)$}}+\underbrace{\(\vv{v}-\text{proj}_{\vv{u}}\vv{v}\)}_{\text{Orthogonal to span$\(\vv{u}\)$}}$$}
\end{itemize}
\begin{center}
\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
\draw[ultra thick, dotted, latex-latex] (-4,-4) -- (4,4) node[right] {span$\(\vv{u}\)$};
\draw[ultra thick, black, -latex] (0,0) -- (2,2) node[right] {$\vv{u}$};
\draw[ultra thick, black, -latex] (0,0) -- (-3,1) node[left] {$\vv{v}$};
\draw[ultra thick, black, -latex] (0,0) -- (-1,-1) node[below, pos = 0.5, rotate=45] {$\text{proj}_{\vv{u}}\vv{v}$};
\draw[ultra thick, black, dotted] (-3,1) -- (-1,-1);
\draw[ultra thick, black, -latex] (0,0) -- (-2,2) node[above, pos = 0.5, rotate=-45] {$\vv{v}-\text{proj}_{\vv{u}}\vv{v}$};
\draw[ultra thick, black, dotted] (-3,1) -- (-2,2);
\draw[red, thick] (-0.25,0.25) -- (0,0.5) -- (0.25,0.25);
\fill (0,0) circle[radius=3pt];
\fill (-3,1) circle[radius=3pt];
\fill (-1,-1) circle[radius=3pt];
\end{tikzpicture}}
\end{center}
\begin{example}{Example: Calculating proj}
Let $\vv{u}=\begin{bmatrix}-1\\0\end{bmatrix}$ and $\vv{v}=\begin{bmatrix}1\\1\end{bmatrix}$. Calculate $\text{proj}_{\vv{u}}\vv{v}$ and $\text{proj}_{\vv{v}}\vv{u}$.
\begin{align*}
\text{proj}_{\vv{u}}\vv{v}&=\(\frac{\vv{u}\cdot\vv{v}}{\vv{u}\cdot\vv{u}}\)\vv{u} & \text{proj}_{\vv{v}}\vv{u}&=\(\frac{\vv{v}\cdot\vv{u}}{\vv{v}\cdot\vv{v}}\)\vv{v} \\
&=\frac{-1\(1\)+0\(1\)}{\(1\)\(1\)+1\(0\)}\vv{u} & &=\frac{-1\(1\)+1\(0\)}{1\(1\)+1\(1\)}\vv{v} \\
&=-1\vv{u} & &=-\frac{1}{2}\vv{v} \\
&=\begin{bmatrix}1\\0\end{bmatrix}&&=\begin{bmatrix}-1/2\\-1/2\end{bmatrix}
\end{align*}
\begin{tblr}{width=\textwidth,colspec={XX}, cells = {halign = c, valign = m}}
{\begin{tikzpicture}
\draw[thin, latex-latex] (0,-2.5) -- (0,2.5);
\draw[thin, latex-latex] (-2.5,0) -- (2.5,0);
\draw[ultra thick,latex-latex, dotted] (-2,0) -- (2,0) node[above] {$\text{span}\(\vv{u}\)$};
\draw[ultra thick,-latex] (0,0) -- (-1,0) node[above] {$\vv{u}$};
\draw[ultra thick, -latex,red] (0,0) -- (1,0) node[pos = 0.5, below] {$\text{proj}_{\vv{u}}\vv{v}$};
\draw[ultra thick,-latex] (0,0) -- (1,1) node[right] {$\vv{v}$};
\draw[ultra thick, -latex,red] (0,0) -- (0,1) node[above] {$\vv{v}-\text{proj}_{\vv{u}}\vv{v}$};
\fill (0,0) circle[radius=3pt];
\end{tikzpicture}} &
{\begin{tikzpicture}
\draw[thin, latex-latex] (0,-2.5) -- (0,2.5);
\draw[thin, latex-latex] (-2.5,0) -- (2.5,0);
\draw[ultra thick,latex-latex, dotted] (-2,-2) -- (2,2) node[right] {$\text{span}\(\vv{v}\)$};
\draw[ultra thick,-latex] (0,0) -- (-1,0) node[left] {$\vv{u}$};
\draw[ultra thick, -latex,red] (0,0) -- (-0.5,-0.5) node[below, rotate=45,pos=0.5] {$\text{proj}_{\vv{v}}\vv{u}$};
\draw[ultra thick,-latex] (0,0) -- (1,1) node[right] {$\vv{v}$};
\draw[ultra thick, -latex,red] (0,0) -- (-0.5,0.5) node[above] {$\vv{u}-\text{proj}_{\vv{v}}\vv{u}$};
\draw[ultra thick, dotted] (-0.5,0.5) -- (-1,0);
\draw[ultra thick, dotted] (-0.5,-0.5) -- (-1,0);
\fill (0,0) circle[radius=3pt];
\end{tikzpicture}}
\end{tblr}
\end{example}
\textbf{Recall:} $\vv{u}$ and $\vv{v}$ are \textbf{orthogonal} if $\vv{u}\cdot\vv{v}=0$
\begin{definition}{Definition: Orthogonal and Orthonormal Set}
Consider the set of vectors $S=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$.
\begin{enumerate}
\item {The set $S$ is \textbf{orthogonal} if all pairs of distinct vectors in the set $S$ are orthogonal.
$$\(\vv{v_i}\cdot\vv{v_j}=0\text{ whenever }i\ne j\)$$}
\item {The set $S$ is \textbf{orthonormal} if $S$ is an orthogonal set containing only unit vectors
$$\(\norm{\vv{v_i}}=1\text{ for all }i\)$$}
\end{enumerate}
\end{definition}
\begin{example}{Example: Is a Set Orthogonal?}
$S_1=\left\{\underbrace{\begin{bmatrix}1\\1\\1\\1\end{bmatrix}}_{\vv{v_1}},\underbrace{\begin{bmatrix}1\\1\\-1\\-1\end{bmatrix}}_{\vv{v_2}},\underbrace{\begin{bmatrix}1\\-1\\-1\\1\end{bmatrix}}_{\vv{v_3}},\underbrace{\begin{bmatrix}-1\\1\\-1\\1\end{bmatrix}}_{\vv{v_4}}\right\}$ verify that $S_1$ is an orthogonal set.
\begin{align*}
\vv{v_1}\cdot\vv{v_2}&=0 & \vv{v_2}\cdot\vv{v_3}&=0 & \vv{v_3}\cdot\vv{v_4}&=0 \\
\vv{v_1}\cdot\vv{v_3}&=0 & \vv{v_2}\cdot\vv{v_4}&=0 \\
\vv{v_1}\cdot\vv{v_4}&=0
\end{align*}
\end{example}
\begin{example}{Example: Is a Set Orthonormal?}
$S_2=\left\{\underbrace{\begin{bmatrix}1\\0\\0\end{bmatrix}}_{\vv{v_1}},\underbrace{\begin{bmatrix}0\\1/\sqrt{2}\\1/\sqrt{2}\end{bmatrix}}_{\vv{v_2}},\underbrace{\begin{bmatrix}0\\1/\sqrt{2}\\-1/\sqrt{2}\end{bmatrix}}_{\vv{v_3}}\right\}$ verify that $S_2$ is an orthonormal set.
\begin{align*}
\norm{\vv{v_1}}&=\sqrt{1+0+0}=\sqrt{1}=1 & \vv{v_1}\cdot\vv{v_2}&=0 \\
\norm{\vv{v_2}}&=\sqrt{0+\frac{1}{2}+\frac{1}{2}}=1 & \vv{v_1}\cdot\vv{v_3}&=0 \\
\norm{\vv{v_3}}&=\sqrt{0+\frac{1}{2}+\frac{1}{2}}=1 & \vv{v_2}\cdot\vv{v_3}&=0
\end{align*}
\end{example}
\begin{theorem}{Theorem: Relating Orthogonal Sets and Linear Independence}
Let $S=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ be a set of vectors.
\begin{enumerate}
\item {If $S$ is an \textbf{orthogonal} set of \textbf{nonzero} vectors, then the set $S$ is linearly independent.}
\item {If $S$ is an orthonormal set, then $S$ is linearly independent.}
\end{enumerate}
\end{theorem}
\textbf{Question:} If a set is linearly independent, must it also be orthogonal? \textbf{No}
\begin{tblr}{width=\linewidth, colspec={XX}, cells={halign=c,valign=m}}
{Linearly independent but not orthogonal.} &
{\raisebox{-.5\height}{\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
\draw[thin, latex-latex] (0,-2.5) -- (0,2.5);
\draw[thin, latex-latex] (-2.5,0) -- (2.5,0);
\draw[ultra thick,-latex] (0,0) -- (1,1) node[right] {$\begin{bmatrix}1\\1\end{bmatrix}$};
\draw[ultra thick,-latex] (0,0) -- (1,2) node[right] {$\begin{bmatrix}1\\2\end{bmatrix}$};
\fill (0,0) circle[radius=3pt];
\end{tikzpicture}}}}
\end{tblr}
\begin{example}{Example: Conclusions We Can Make When Columns Of A Matrix Are Orthogonal}
$A=\begin{bmatrix}1&1&1&-1\\1&1&-1&1\\1&-1&-1&-1\\1&-1&1&1\end{bmatrix}$ The columns of $A$ are orthogonal (shown in a previous example) and nonzero so the columns of $A$ are linearly independent.
\begin{itemize}
\item rank$A=4$
\item $A$ is invertible
\item {Is $\lambda=0$ an eigenvalue of $A$? \textbf{No}
$$\text{det}\(A\)\ne0$$}
\end{itemize}
\end{example}
\textbf{Recall:} Let $W$ be a subspace of $\mathbb{R}^n$. Then $\mathcal{B}=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ is a basis for $W$ if:
\begin{enumerate}
\item $W=\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)$ ($\mathcal{B}$ spans $W$)
\item $\mathcal{B}$ is a linearly independent set
\end{enumerate}
\begin{definition}{Definition: Orthogonal and Orthonormal Basis}
Let $W$ be a subspace of $\mathbb{R}^n$. Let $S=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ be a set of vectors in $W$. Then:
\begin{enumerate}
\item {The set $S$ is an \textbf{orthogonal basis} for $W$ if $S$ is a basis for $W$ and $S$ is an orthogonal set.}
\item {The set $S$ is an \textbf{orthonormal basis} for $W$ if $S$ is a basis for $W$ and $S$ is an orthonormal set.}
\end{enumerate}
\end{definition}
\begin{example}{Example: Orthogonal Basis}
$S_1=\left\{\begin{bmatrix}1\\1\\1\\1\end{bmatrix},\begin{bmatrix}1\\1\\-1\\-1\end{bmatrix},\begin{bmatrix}1\\-1\\-1\\1\end{bmatrix},\begin{bmatrix}-1\\1\\-1\\1\end{bmatrix}\right\}$
\begin{itemize}
\item $S_1$ is \textbf{orthogonal}
\item $S_1$ is linearly independent
\item Any $4$ linearly independent vectors in $\mathbb{R}^4$ forms a \textbf{basis} for $\mathbb{R}^4$.
\end{itemize}
Thus $S_1$ is an \textbf{orthogonal basis} for $\mathbb{R}^4$.
\end{example}
\begin{example}{Example: Orthonormal Basis}
$S_2=\left\{\begin{bmatrix}1\\0\\0\end{bmatrix},\begin{bmatrix}0\\1/\sqrt{2}\\1/\sqrt{2}\end{bmatrix},\begin{bmatrix}0\\1/\sqrt{2}\\-1/\sqrt{2}\end{bmatrix}\right\}$
\begin{itemize}
\item $S_2$ is \textbf{orthonormal}
\item $S_2$ is linearly independent
\item Any 3 linearly indepedent vectors in $\mathbb{R}^3$ forms a \textbf{basis} for $\mathbb{R}^3$.
\end{itemize}
Thus $S_2$ is an \textbf{orthonormal basis} for $\mathbb{R}^3$.
\end{example}
Given a vector $\vv{x}$ in $\mathbb{R}^n$ and an \textbf{orthogonal basis} $\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_n}\right\}$ for $\mathbb{R}^n$. We have:
$$\vv{x}=c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_n\vv{v_n}\text{ for some }c_1,c_2,\hdots,c_n$$
consider $\vv{x}\cdot\vv{v_1}$:
\begin{align*}
\vv{x}\cdot\vv{v_1}&=\(c_1\vv{v_1}+c_2\vv{v_2}+\hdots+c_n\vv{v_n}\)\cdot\vv{v_1} \\
&=c_1\(\vv{v_1}\cdot\vv{v_1}\)+\underbrace{\cancel{c_2\(\vv{v_2}\cdot\vv{v_1}\)+\hdots+c_n\(\vv{v_n}\cdot\vv{v_1}\)}}_{0} \\
\vv{x}\cdot\vv{v_1}&=c_1\(\vv{c_1}\cdot\vv{v_1}\) \\
c_1&=\frac{\vv{x}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}}
\end{align*}
\begin{theorem}{Theorem: Writing A Vector In The Form Of A Sum Of Projection}
Let $W$ be a subspace of $\mathbb{R}^n$ and let $S=\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ be an orthogonal basis for $W$. Then for every vector $\vv{x}$ in $W$, we have:
\begin{align*}
\vv{x}&=\frac{\vv{x}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}}\vv{v_1}+\frac{\vv{x}\cdot\vv{v_2}}{\vv{v_2}\cdot\vv{v_2}}\vv{v_2}+\hdots+\frac{\vv{x}\cdot\vv{v_k}}{\vv{v_k}\cdot\vv{v_k}}\vv{v_k} \\
\vv{x}&=\text{proj}_{\vv{v_1}}\vv{x}+\text{proj}_{\vv{v_2}}\vv{x}+\hdots+\text{proj}_{\vv{v_k}}\vv{x}
\end{align*}
\begin{itemize}
\item We can write $\vv{x}$ as a sum of projections.
\end{itemize}
\end{theorem}
\begin{example}{Example: Write A Vector As A Linear Combination Of Other Vectors}
Write $\vv{v}=\begin{bmatrix}1\\2\\3\\4\end{bmatrix}$ as a linear combination of $\vv{v_1}=\begin{bmatrix}1\\1\\1\\1\end{bmatrix}$, $\vv{v_2}=\begin{bmatrix}1\\1\\-1\\-1\end{bmatrix}$, $\vv{v_3}=\begin{bmatrix}1\\-1\\-1\\1\end{bmatrix}$, $\vv{v_4}=\begin{bmatrix}-1\\1\\-1\\1\end{bmatrix}$. \\ \newline
$\left\{\vv{v_1},\vv{v_2},\vv{v_3},\vv{v_4}\right\}$ is an orthogonal basis for $\mathbb{R}^4$ so:
\begin{align*}
c_1&=\frac{\vv{v}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}} & c_2&=\frac{\vv{v}\cdot\vv{v_2}}{\vv{v_2}\cdot\vv{v_2}} & c_3&=\frac{\vv{v}\cdot\vv{v_3}}{\vv{v_3}\cdot\vv{v_3}} & c_4&=\frac{\vv{v}\cdot\vv{v_4}}{\vv{v_4}\cdot\vv{v_4}} \\
&=\frac{10}{4} & &=\frac{-4}{4} & &=\frac{0}{4} & &=\frac{2}{4} \\
&=\frac{5}{2} & &=-1 & &=0 & &=\frac{1}{2}
\end{align*}
\fbox{$\displaystyle{\vv{v}=\frac{5}{2}\vv{v_1}-\vv{v_2}+0\vv{v_3}+\frac{1}{2}\vv{v_4}}$}
\end{example}
\newpage
\section{Section 5.2 Orthogonal Complements and Orthogonal Projections}
\begin{definition}{Definition: Orthogonal to A Subspace and Orthogonal Complement}
Let $W$ be a subspace of $\mathbb{R}^n$. Let $\vv{x}$ be a vector in $\mathbb{R}^n$. We say $\vv{x}$ is \textbf{orthogonal to $W$} if $\vv{x}$ is orthogonal to every vector in $W$. The set of all vectors orthogonal to $W$ is called the \textbf{orthogonal complement} of $W$, denoted $W^\perp$ ``$W$ perp''.
\end{definition}
\begin{example}{Example: Vector Orthogonal to Subspace}
Let $W=\text{span}\(\begin{bmatrix}1\\1\end{bmatrix}\)$. \\
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{$\begin{bmatrix}c\\c\end{bmatrix}\cdot\begin{bmatrix}1\\-1\end{bmatrix}=c-c=0$\\ \newline
$\begin{bmatrix}1\\-1\end{bmatrix}$ is orthogonal to $W$.\\ \newline
$\begin{bmatrix}c\\c\end{bmatrix}\cdot\begin{bmatrix}d\\-d\end{bmatrix}=cd-cd=0$\\ \newline
Then $W^\perp=\text{span}\(\begin{bmatrix}1\\-1\end{bmatrix}\)$} &
{\raisebox{-.5\height}{\resizebox{\linewidth}{!}{\begin{tikzpicture}
\draw[thin, latex-latex] (0,-2.5) -- (0,2.5) node[right] {$y$};
\draw[thin, latex-latex] (-2.5,0) -- (2.5,0) node[above] {$x$};
\draw[black, dotted, latex-latex, ultra thick] (-2.5,-2.5) -- (2.5,2.5) node[right] {$W$};
\draw[black, dotted, latex-latex, ultra thick] (2.5,-2.5) -- (-2.5,2.5) node[left] {$W^\perp$};
\draw[red, thick] (0.25,0.25) -- (0.5,0) -- (0.25,-0.25);
\draw[black, -latex, ultra thick] (0,0) -- (1,1);
\draw[black, -latex, ultra thick] (0,0) -- (1,-1);
\fill (0,0) circle[radius=3pt];
\end{tikzpicture}}}}
\end{tblr}
\end{example}
\textbf{Properties of $W^\perp$:}
\begin{itemize}
\item If $W$ is a subspace of $\mathbb{R}^n$, then $W^\perp$ is also a subspace of $\mathbb{R}^n$.
\item The only vector in both $W$ and $W^\perp$ is $\vv{0}$ $\(W\cap W^\perp=\left\{\vv{0}\right\}\)$.
\item $\(W^\perp\)^\perp=W$
\item {If $W=\text{span}\(\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\)$, then $\vv{v}$ is in $W^\perp$ if and only if $\vv{v}$ is orthogonal to $\vv{v_1},\vv{v_2},\hdots\text{ and }\vv{v_k}$.}
\end{itemize}
\begin{example}{Example: Seeing If A Vector Is In $W^\perp$}
Let $W=\text{span}\(\begin{bmatrix}1\\0\\0\end{bmatrix},\begin{bmatrix}0\\1\\1\end{bmatrix}\)$. Are $\vv{u}=\begin{bmatrix}0\\0\\1\end{bmatrix}$ and $\vv{v}=\begin{bmatrix}0\\1\\-1\end{bmatrix}$ in $W^\perp$?\\
\begin{tblr}{width=\linewidth,colspec={X|X},cells={valign=m,halign=c}}
{$\begin{aligned}
\vv{u}\cdot\begin{bmatrix}1\\0\\0\end{bmatrix}&=0\\
\vv{u}\cdot\begin{bmatrix}0\\1\\1\end{bmatrix}&=1\ne0
\end{aligned}$\\
$\vv{u}$ is \textbf{not} in $W^\perp$} &
{$\begin{aligned}
\vv{v}\cdot\begin{bmatrix}1\\0\\0\end{bmatrix}&=0\\
\vv{v}\cdot\begin{bmatrix}0\\1\\1\end{bmatrix}&=0
\end{aligned}$\\
$\vv{v}$ is \textbf{in} $W^\perp$}
\end{tblr}
Any vector in $W$ can be written in the form $c\begin{bmatrix}1\\0\\0\end{bmatrix}+d\begin{bmatrix}0\\1\\1\end{bmatrix}$ and
\begin{align*}
\vv{v}\cdot\(c\begin{bmatrix}1\\0\\0\end{bmatrix}+d\begin{bmatrix}0\\1\\1\end{bmatrix}\)&=c\(\vv{v}\cdot\begin{bmatrix}1\\0\\0\end{bmatrix}\)+d\(\vv{v}\cdot\begin{bmatrix}0\\1\\1\end{bmatrix}\) \\
&=c\(0\)+d\(0\)\\
&=0
\end{align*}
So $\vv{v}$ is orthogonal to every vector in $W$.
\end{example}
\begin{example}{Example: Finding the Orthogonal Complement}
Find the orthogonal complement of the subspace $S$ of $\mathbb{R}^4$ spanned by the columns of\\ $A=\begin{bmatrix}\vv{v_1}&\vv{v_2}\end{bmatrix}=\begin{bmatrix}1&0\\2&0\\1&0\\0&1\end{bmatrix}$.
$$S=\text{span}\(\vv{v_1},\vv{v_2}\)=\text{col}A$$
\begin{itemize}
\item {We want to find $\vv{x}$ such that $\vv{x}\cdot\vv{v_1}=0$ and $\vv{x}\cdot\vv{v_2}=0$. In other words:
$$\begin{bmatrix}x_1\\x_2\\x_3\\x_4\end{bmatrix}\cdot\begin{bmatrix}1\\2\\1\\0\end{bmatrix}=0\qquad\text{and}\qquad\begin{bmatrix}x_1\\x_2\\x_3\\x_4\end{bmatrix}\cdot\begin{bmatrix}0\\0\\0\\1\end{bmatrix}=0$$
When turned into a linear system of equations $\systeme{
x_1+2x_2+x_3=0,
x_4=0}$ you get a homogeneous linear system. Which we can solve:
$$\begin{bmatrix}1&2&1&0\\0&0&0&1\end{bmatrix}\begin{bmatrix}x_1\\x_2\\x_3\\x_4\end{bmatrix}=\begin{bmatrix}0\\0\end{bmatrix}\qquad\begin{aligned}x_1&=-2x_2-x_3\\x_4&=0\end{aligned}$$
$\vv{x}=\begin{bmatrix}-2s-t\\s\\t\\0\end{bmatrix}=s\begin{bmatrix}-2\\1\\0\\0\end{bmatrix}+t\begin{bmatrix}-1\\0\\1\\0\end{bmatrix}$.}
\item {Find $S^\perp$ (the set containing all vectors orthogonal to $S$)
$$\begin{aligned}
S^\perp&=\text{span}\(\begin{bmatrix}-2\\1\\0\\0\end{bmatrix},\begin{bmatrix}-1\\0\\1\\0\end{bmatrix}\)=\text{null}\(A^T\)\\
S&=\text{col}A
\end{aligned}$$}
\end{itemize}
\end{example}
\begin{theorem}{Theorem: Relating null And col Using Orthogonal Complements}
Let $A$ be an $m\times n$ matrix. Then $\(\text{col}A\)^\perp=\text{null}\(A^T\)$.
\end{theorem}
\begin{example}{Example: Finding A Basis For $W^\perp$}
Let $W=\text{span}\(\begin{bmatrix}1\\1\\0\end{bmatrix},\begin{bmatrix}0\\0\\1\end{bmatrix}\)$. Find a basis for $W^\perp$ and calculate dim$\(W^\perp\)$.
$$\begin{aligned}W&=\text{col}A\\W^\perp&=\(\text{col}A\)^\perp=\text{null}\(A^T\)\end{aligned}\qquad\text{where}\qquad A=\begin{bmatrix}1&0\\1&0\\0&1\end{bmatrix}$$
With $A^T=\begin{bmatrix}1&1&0\\0&0&1\end{bmatrix}$ then the system of equations is:
\begin{align*}
x_1+x_2&=0 & x_1&=-x_2 \\
x_3&=0 & x_3&=0
\end{align*}
$\vv{x}=\begin{bmatrix}-t\\t\\0\end{bmatrix}=t\begin{bmatrix}-1\\1\\0\end{bmatrix}$, thus $W^\perp=\text{span}\(\begin{bmatrix}-1\\1\\0\end{bmatrix}\)$ making the basises:
\begin{align*}
\mathcal{B}_W&=\left\{\begin{bmatrix}1\\1\\0\end{bmatrix},\begin{bmatrix}0\\0\\1\end{bmatrix}\right\} & \text{dim}\(W\)&=2 \\
\mathcal{B}_{W^\perp}&=\left\{\begin{bmatrix}-1\\1\\0\end{bmatrix}\right\} & \text{dim}\(W^\perp\)&=1
\end{align*}
$\text{dim}W+\text{dim}\(W^\perp\)=2+1=3=\text{dim}\(\mathbb{R}^3\)$
\end{example}
\begin{theorem}{Theorem: Dimensions Sum To $n$}
Let $W$ be a subspace of $\mathbb{R}^n$. Then
$$\text{dim}W+\text{dim}\(W^\perp\)=n$$
\end{theorem}
\begin{example}{Example: Graphical Representation Of $W^\perp$}
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{$\begin{aligned}
W&=\text{span}\(\begin{bmatrix}1\\1\\0\end{bmatrix},\begin{bmatrix}0\\0\\1\end{bmatrix}\) \\
W^\perp&=\text{span}\(\begin{bmatrix}-1\\1\\0\end{bmatrix}\)
\end{aligned}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -1.5, ymax = 1.5,
xmin = -1.5, xmax = 1.5,
zmin = -1.5, zmax = 1.5,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
3d box=background,
grid=major,
]
\fill[red, fill=red, draw = red, fill opacity=0.2, ultra thick] (-1.5,-1.5,1.5) -- (1.5,1.5,1.5) node[below left, black, fill opacity=1] {$W$} -- (1.5,1.5,-1.5) -- (-1.5,-1.5,-1.5) -- cycle;
\draw[thick, blue] (0,0,0.5) -- (0.5,-0.5,0.5) -- (0.5,-0.5,0);
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)};
\draw[black, ultra thick, -latex] (0,0,0) -- (1,1,0);
\draw[black, ultra thick, -latex] (0,0,0) -- (0,0,1);
\draw[black, ultra thick, latex-latex, dotted] (-1.5,1.5,0) -- (1.5,-1.5,0) node[left, fill opacity=1] {$W^\perp$};
\draw[black, ultra thick, -latex] (0,0,0) -- (-1,1,0);
\end{axis}
\end{tikzpicture}}}
\end{tblr}
\end{example}
\subsection{Orthogonal Projections}
\textbf{Recall:} For a vector $\vv{v}$ in $\mathbb{R}^n$ and a nonzero vector $\vv{u}$ in $\mathbb{R}^n$, $\text{proj}_{\vv{u}}\vv{v}=\frac{\vv{v}\cdot\vv{u}}{\vv{u}\cdot\vv{u}}\vv{u}$.
\begin{center}
\resizebox{0.5\linewidth}{!}{\begin{tikzpicture}
\draw[thick, blue] (1.25,0) -- (1.25, 0.25) -- (1.5, 0.25);
\draw[ultra thick, dotted, latex-latex] (-2,0) -- (2.5,0) node[right] {span$\(\vv{u}\)$};
\draw[ultra thick, red, -latex] (0,0) -- (1.5,0) node[below, pos=0.5] {$\text{proj}_{\vv{u}}\vv{v}$};
\draw[ultra thick, -latex] (0,0) -- (1,0) node[above] {$\vv{u}$};
\draw[ultra thick, -latex] (0,0) -- (1.5,1) node[above] {$\vv{v}$};
\draw[ultra thick, dotted] (1.5,1) -- (1.5,0);
\fill (0,0) circle[radius=3pt];
\fill (1.5,1) circle[radius=3pt];
\fill (1.5,0) circle[radius=3pt];
\end{tikzpicture}}
\end{center}
We project $\vv{v}$ onto a one-dimensional subspace when we project $\vv{v}$ onto $\vv{u}$.
\begin{definition}{Definition: Orthogonal Projection}
Let $W$ be a subspace of $\mathbb{R}^n$. Let $\vv{v}$ be a vector in $\mathbb{R}^n$. Let $\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ be an \textbf{orthogonal basis for $W$}. Then the \textbf{orthogonal projection of $\vv{v}$ onto $W$} is
\begin{align*}
\text{proj}_W\vv{v}&=\frac{\vv{v}\cdot\vv{u_1}}{\vv{u_1}\cdot\vv{u_1}}\vv{u_1}+\frac{\vv{v}\cdot\vv{u_2}}{\vv{u_2}\cdot\vv{u_2}}\vv{u_2}+\hdots+\frac{\vv{v}\cdot\vv{u_k}}{\vv{u_k}\cdot\vv{u_k}}\vv{u_k} \\
\text{proj}_W\vv{v}&=\text{proj}_{\vv{u_1}}\vv{v}+\text{proj}_{\vv{u_2}}\vv{v}+\hdots+\text{proj}_{\vv{u_k}}\vv{v}
\end{align*}
\end{definition}
\textbf{The component of $\vv{v}$ orthogonal to $W$} is the vector $\text{perp}_W\vv{v}=\vv{v}-\text{proj}_W\vv{v}$.
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{$\vv{v}=\underbrace{\text{proj}_W\vv{v}}_{\text{in }W}+\underbrace{\text{perp}_W\vv{v}}_{\text{in }W^\perp}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -2, ymax = 2,
xmin = -2, xmax = 2,
zmin = -2, zmax = 2,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
3d box=background,
ticks = none,
grid=major,
]
\fill[red, fill=red, draw = red, fill opacity=0.2, ultra thick] (-2,-2,0) -- (2,-2,0) -- (2,2,0) node[above left, red, fill opacity=1] {$W$} -- (-2,2,0) -- cycle;
\draw[black, thick] (-0.5,0,0) -- (-0.5,0,0.5) --(0,0,0.5);
\draw[dotted, black, ultra thick, latex-latex] (0,0,-2) -- (0,0,2) node[left, pos = 0] {$W^\perp$};
\draw[blue, -latex, ultra thick] (0,0,0) -- (0,0,1) node[left] {$\text{perp}_W\vv{v}$};
\draw[blue, -latex, ultra thick] (0,0,0) -- (1,1,0) node[below, pos=0.5] {$\text{proj}_W\vv{v}$};
\draw[black, -latex, ultra thick] (0,0,0) -- (1,1,1) node[right] {$\vv{v}$};
\draw[dotted, black, ultra thick] (1,1,1) -- (1,1,0);
\draw[dotted, black, ultra thick] (0,0,1) -- (1,1,1);
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)} node[left] {$\vv{0}$};
\end{axis}
\end{tikzpicture}}}
\end{tblr}
\subsection{Orthogonal Decomposition Theorem}
\begin{theorem}{Theorem: Orthogonal Decomposition Theorem}
Let $W$ be a subspace of $\mathbb{R}^n$ and let $\vv{v}$ be a vector in $\mathbb{R}^n$. Then there are unique vectors $\vv{a}$ in $W$ and $\vv{b}$ in $W^\perp$ such that $\vv{v}=\vv{a}+\vv{b}$. More specifically,
$$\vv{v}=\text{proj}_W\vv{v}+\text{perp}_{W}\vv{v}$$
\end{theorem}
\begin{example}{Example: Finding $\vv{a}$ and $\vv{b}$}
Let $W=\text{span}\(\begin{bmatrix}2\\0\\1\end{bmatrix},\begin{bmatrix}-1\\1\\2\end{bmatrix}\)$ and $\vv{v}=\begin{bmatrix}1\\2\\0\end{bmatrix}$. Find a vector $\vv{a}$ in $W$ and a vector $\vv{b}$ in $W^\perp$ such that $\vv{v}=\vv{a}+\vv{b}$.
$$\mathcal{B}_W=\left\{\underbrace{\begin{bmatrix}2\\0\\1\end{bmatrix}}_{\vv{v_1}},\underbrace{\begin{bmatrix}-1\\1\\2\end{bmatrix}}_{\vv{v_2}}\right\}$$
As $\mathcal{B}_W$ is a \textbf{orthogonal} basis for $W$ then we can find the orthogonal projection.
\begin{align*}
\vv{a}&=\text{proj}_{W}\vv{v} & \vv{b}&=\text{perp}_W\vv{v} \\
&=\frac{\vv{v}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}}\vv{v_1}+\frac{\vv{v}\cdot\vv{v_2}}{\vv{v_2}\cdot\vv{v_2}}\vv{v_2} & &=\vv{v}-\text{proj}_W\vv{v} \\
&=\frac{2}{5}\begin{bmatrix}2\\0\\1\end{bmatrix}+\frac{1}{6}\begin{bmatrix}-1\\1\\2\end{bmatrix} & &=\begin{bmatrix}1\\2\\0\end{bmatrix}-\frac{1}{30}\begin{bmatrix}19\\5\\22\end{bmatrix} \\
&=\frac{12}{30}\begin{bmatrix}2\\0\\1\end{bmatrix}+\frac{5}{30}\begin{bmatrix}-1\\1\\2\end{bmatrix} & &=\frac{1}{30}\begin{bmatrix}11\\55\\-22\end{bmatrix}\in W^\perp \\
&=\frac{1}{30}\begin{bmatrix}19\\5\\22\end{bmatrix}\in W
\end{align*}
Now we can write $\vv{v}$ as:
$$\vv{v}=\underbrace{\frac{1}{30}\begin{bmatrix}19\\5\\22\end{bmatrix}}_{\text{in }W}+\underbrace{\frac{1}{30}\begin{bmatrix}11\\55\\22\end{bmatrix}}_{\text{in }W^\perp}$$
\end{example}
The orthogonal Projection of $\vv{v}$ in $\mathbb{R}^n$ onto a subspace $W$ of $\mathbb{R}^n$ is the best approximation of the vector $\vv{v}$ by vectors in $W$. ($\text{proj}_W\vv{v}$ is the vector in $W$ that is closest to $\vv{v}$)
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{Distance between $\vv{v}$ and $W$ is $\norm{\text{perp}_W\vv{v}}$} &
{\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis}[
width = \linewidth,
ymin = -2, ymax = 2,
xmin = -2, xmax = 2,
zmin = -2, zmax = 2,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
3d box=background,
ticks = none,
grid=major,
]
\fill[red, fill=red, draw = red, fill opacity=0.2, ultra thick] (-2,-2,0) -- (2,-2,0) -- (2,2,0) node[above left, red, fill opacity=1] {$W$} -- (-2,2,0) -- cycle;
\draw[black, thick] (-0.5,0,0) -- (-0.5,0,0.5) --(0,0,0.5);
\draw[dotted, black, ultra thick, latex-latex] (0,0,-2) -- (0,0,2) node[left, pos = 0] {$W^\perp$};
\draw[blue, -latex, ultra thick] (0,0,0) -- (0,0,1) node[left] {$\text{perp}_W\vv{v}$};
\draw[blue, -latex, ultra thick] (0,0,0) -- (1,1,0) node[below, pos=0.5] {$\text{proj}_W\vv{v}$};
\draw[black, -latex, ultra thick] (0,0,0) -- (1,1,1) node[right] {$\vv{v}$};
\draw[dotted, black, ultra thick] (1,1,1) -- (1,1,0);
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)} node[left] {$\vv{0}$};
\end{axis}
\end{tikzpicture}}}
\end{tblr}
\begin{example}{Example: Find The CLosest Approximation}
Let $A=\begin{bmatrix}1&1&0\\0&1&1\\1&-1&0\\0&-1&1\end{bmatrix}$ and $\vv{b}=\begin{bmatrix}3\\-3\\-2\\4\end{bmatrix}$ show that $A\vv{x}=\vv{b}$ is inconsistent. Then find the unique vector in $\text{col}A$ that is closest to $\vv{b}$. \\ \newline
Row Reduce:
\begin{align*}
\begin{bmatrix}A&\aug&\vv{b}\end{bmatrix}=\begin{bmatrix}1&1&0&\aug&3\\0&1&1&\aug&-3\\1&-1&0&\aug&-2\\0&-1&1&\aug&4\end{bmatrix}\xrightarrow{R_3-R_1}&\begin{bmatrix}1&1&0&\aug&3\\0&1&1&\aug&-3\\0&-2&0&\aug&-5\\0&-1&1&\aug&4\end{bmatrix}\\
\xrightarrow[R_4+R_2]{R_3+2R_2}&\begin{bmatrix}1&1&0&\aug&3\\0&1&1&\aug&-3\\0&0&2&\aug&-11\\0&0&2&\aug&1\end{bmatrix}\\
\xrightarrow{R_4-R_3}&\begin{bmatrix}1&1&0&\aug&3\\0&1&1&\aug&-3\\0&0&2&\aug&-11\\0&0&0&\aug&12\end{bmatrix}
\end{align*}
As $0\ne12$ then no solution exists, making $A\vv{x}=\vv{b}$ inconsitent so $\vv{b}$ is \textbf{not} in col$A$. However the vector in col$A$ closest to $\vv{b}$ is $\text{proj}_{\text{col}A}\vv{b}$.
$$\mathcal{B}_{\text{col}A}=\left\{\underbrace{\begin{bmatrix}1\\0\\1\\0\end{bmatrix}}_{\vv{v_1}},\underbrace{\begin{bmatrix}1\\1\\-1\\-1\end{bmatrix}}_{\vv{v_2}},\underbrace{\begin{bmatrix}0\\1\\0\\1\end{bmatrix}}_{\vv{v_3}}\right\}$$
As $\mathcal{B}_{\text{col}A}$ is a orthogonal basis for col$A$ (linearly independent and span col$A$) then
$$\text{proj}_{\text{col}A}\vv{b}=\text{proj}_{\vv{v_1}}\vv{b}+\text{proj}_{\vv{v_2}}\vv{b}+\text{proj}_{\vv{v_3}}\vv{b}=\begin{bmatrix}0\\0\\1\\1\end{bmatrix}$$
\end{example}
\textbf{Note:} If $\vv{b}$ is in col$A$, then $\text{proj}_{\text{col}A}\vv{b}=\vv{b}$.
\newpage
\section{Section 5.3 The Gram-Schmidt Process}
\begin{example}{Example: Converting A Basis To An Orthogonal Basis}
Let $W=\text{span}\(\vv{v_1},\vv{v_2}\)$ where $\vv{v_1}=\begin{bmatrix}1\\0\\1\end{bmatrix}$ and $\vv{v_2}=\begin{bmatrix}1\\1\\0\end{bmatrix}$. Let $\vv{x}=\begin{bmatrix}1\\2\\3\end{bmatrix}$. Find $\text{proj}_W\vv{x}$. \\ \newline
$\left\{\begin{bmatrix}1\\0\\1\end{bmatrix},\begin{bmatrix}1\\1\\0\end{bmatrix}\right\}$ is a basis for $W$ (linearly independent set that spans $W$) but not an orthogonal basis $\(\vv{v_1}\cdot\vv{v_2}=1\ne0\)$. Thus we need to create an orthogoinal basis for $W$ before we can find $\text{proj}_W\vv{x}$. \\
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{$\vv{v_1}=\begin{bmatrix}1\\0\\1\end{bmatrix}\qquad\vv{v_2}=\begin{bmatrix}1\\1\\0\end{bmatrix}$\\ \newline
$\begin{aligned}
\vv{W_1}&=\vv{v_1}=\begin{bmatrix}1\\0\\1\end{bmatrix} \\
\vv{W_2}&=\vv{v_2}-\text{proj}_{\vv{W_1}}\vv{v_2} \\
&=\vv{v_2}-\frac{\vv{v_2}\cdot \vv{W_1}}{\vv{W_1}\cdot\vv{W_1}}\vv{W_1} \\
&=\begin{bmatrix}1\\1\\0\end{bmatrix}-\frac{1}{2}\begin{bmatrix}1\\0\\1\end{bmatrix} \\
\vv{v_2}&=\begin{bmatrix}1/2\\1\\-1/2\end{bmatrix}
\end{aligned}$} &
{\raisebox{-.5\height}{\resizebox{\linewidth}{!}{\begin{tikzpicture}
\draw[red, thick] (0,0.25) -- (0.25,0.25) -- (0.25, 0);
\draw[dotted, latex-latex, black, ultra thick] (-2,0) -- (2,0);
\draw[dotted, latex-latex, black, ultra thick] (0,-2) -- (0,2);
\draw[ultra thick, black, -latex] (0,0) -- (1,0) node[above] {$\vv{v_1}$};
\draw[ultra thick, black, -latex] (0,0) -- (-1,1) node[left] {$\vv{v_2}$};
\draw[ultra thick, blue, -latex] (0,0) -- (0,1) node[right] {$\text{perp}_{\vv{v_1}}\vv{v_2}=\vv{v_2}-\text{proj}_{\vv{v_1}}\vv{v_2}$};
\draw[ultra thick, blue, -latex] (0,0) -- (-1,0) node[below] {$\text{proj}_{\vv{v_1}}\vv{v_2}$};
\draw[ultra thick, black, dotted] (-1,0) -- (-1,1);
\draw[ultra thick, black, dotted] (-1,1) -- (0,1);
\fill (0,0) circle[radius=3pt];
\end{tikzpicture}}}}
\end{tblr}
An orthogonal basis for $W$ is $\left\{\begin{bmatrix}1\\0\\1\end{bmatrix},\begin{bmatrix}1/2\\1\\-1/2\end{bmatrix}\right\}$, another orthogonal basis for $W$ is $\left\{\begin{bmatrix}1\\0\\1\end{bmatrix},\begin{bmatrix}1\\2\\-1\end{bmatrix}\right\}$.
\begin{align*}
\text{proj}_W\vv{x}&=\frac{1+0+3}{1+0+1}\begin{bmatrix}1\\0\\1\end{bmatrix}+\frac{1+4-3}{1+4+1}\begin{bmatrix}1\\2\\-1\end{bmatrix} \\
\Aboxed{\text{proj}_W\vv{x}&=\begin{bmatrix}7/3\\2/3\\5/3\end{bmatrix}=\frac{1}{3}\begin{bmatrix}7\\2\\5\end{bmatrix}}
\end{align*}
\end{example}
\subsection{The Gram-Schmidt Process}
Let $W$ be a $k$-dimensional subspace of $\mathbb{R}^n$ and let $\mathcal{B}=\left\{\vv{x_1},\vv{x_2},\hdots,\vv{x_k}\right\}$ be a basis for $W$. Define the vectors $\vv{v_1},\vv{v_2},\hdots,\vv{v_k}$ as follows:
\begin{align*}
\vv{v_1}&=\vv{x_1} \\
\vv{v_2}&=\vv{x_2}-\text{proj}_{\vv{v_1}}\vv{x_2}=\vv{x_2}-\frac{\vv{x_2}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}}\vv{v_1} \\
\vv{v_3}&=\vv{x_3}-\text{proj}_{\vv{v_1}}\vv{x_3}-\text{proj}_{\vv{v_2}}\vv{x_3} \\
&\vdots \\
\vv{v_k}&=\vv{x_k}-\text{proj}_{\vv{v_1}}\vv{x_k}-\text{proj}_{\vv{v_2}}\vv{x_k}-\hdots-\text{proj}_{\vv{v}_{k-1}}\vv{x_k}
\end{align*}
Then $\left\{\vv{v_1},\vv{v_2},\hdots,\vv{v_k}\right\}$ is an orthogonal basis for $W$.
\begin{example}{Example: Gram-Schmidt Process With A $2\times 4$ Matrix}
Find an orthogonal basis for null$A$ if $A=\begin{bmatrix}1&0&1&1\\0&1&0&1\end{bmatrix}$. \\ \newline
Find a basis for null$A$: solve $A\vv{x}=\vv{0}$
$$\begin{aligned}
x_1+x_3+x_4&=0 &\qquad x_1&=-t-s \\
x_2+x_4&=0 &\qquad x_2&=-t \\
&&\qquad x_3&=s\\
&&\qquad x_4&=t
\end{aligned}\qquad\vv{x}=\begin{bmatrix}-s-t\\-t\\s\\t\end{bmatrix}=t\begin{bmatrix}-1\\-1\\0\\1\end{bmatrix}+s\begin{bmatrix}-1\\0\\1\\0\end{bmatrix}$$
$\mathcal{B}_{\text{null}A}=\left\{\underbrace{\begin{bmatrix}-1\\0\\1\\0\end{bmatrix}}_{\vv{x_1}},\underbrace{\begin{bmatrix}-1\\-1\\0\\1\end{bmatrix}}_{\vv{x_2}}\right\}$
\begin{align*}
\vv{v_1}&=\vv{x_1} & \vv{v_2}&=\vv{x_2}-\text{proj}_{\vv{v_1}}\vv{x_2} \\
&=\begin{bmatrix}-1\\0\\1\\0\end{bmatrix} & &=\begin{bmatrix}-1\\-1\\0\\1\end{bmatrix}-\frac{1}{2}\begin{bmatrix}-1\\0\\1\\0\end{bmatrix} \\
&&&=\begin{bmatrix}-1/2\\-1\\-1/2\\1\end{bmatrix}
\end{align*}
$\left\{\begin{bmatrix}-1\\0\\1\\0\end{bmatrix},\begin{bmatrix}-1/2\\-1\\-1/2\\1\end{bmatrix}\right\}$ is an orthogonal basis for null$A$.
\end{example}
\begin{example}{Example: Gram-Schmidt Process With A $4\times3$ Matrix}
Find an orthogonal basis for the column space of $A$ where $A=\begin{bmatrix}3&-5&1\\1&1&1\\-1&5&-2\\3&-7&8\end{bmatrix}$. (Hint: rank$A=3$) \\ \newline
rank$A=3$ so the columns of $A$ are linearly independent (rank$A=\text{ \# of columns in }A$) and the columns of $A$ also span col$A$ (by definition of column space) so a basis for col$A$ is $\mathcal{B}_{\text{col}A}=\left\{\underbrace{\begin{bmatrix}3\\1\\-1\\3\end{bmatrix}}_{\vv{x_1}},\underbrace{\begin{bmatrix}-5\\1\\5\\-7\end{bmatrix}}_{\vv{x_2}},\underbrace{\begin{bmatrix}1\\1\\-2\\8\end{bmatrix}}_{\vv{x_3}}\right\}$. Now finding an orthogonal basis:
\begin{align*}
\vv{v_1}&=\vv{x_1} & \vv{v_2}&=\vv{x_2}-\frac{\vv{x_2}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}}\vv{v_1} & \vv{v_3}&=\vv{x_3}-\frac{\vv{x_3}\cdot\vv{v_1}}{\vv{v_1}\cdot\vv{v_1}}\vv{v_1}-\frac{\vv{x_3}\cdot\vv{v_2}}{\vv{v_2}\cdot\vv{v_2}}\vv{v_2} \\
&=\begin{bmatrix}3\\1\\-1\\3\end{bmatrix}& &=\begin{bmatrix}1\\3\\3\\-1\end{bmatrix} & &=\vv{x_3}-\frac{30}{20}\vv{v_1}-\frac{-10}{20}\vv{v_2} \\
&&&&&=\begin{bmatrix}-3\\1\\1\\3\end{bmatrix}
\end{align*}
$\left\{\begin{bmatrix}3\\1\\-1\\3\end{bmatrix},\begin{bmatrix}1\\3\\3\\-1\end{bmatrix},\begin{bmatrix}-3\\1\\1\\3\end{bmatrix}\right\}$ is an orthogonal basis for col$A$.
\end{example}
\newpage
\section{Section 7.3: Least Suqares Approximation}
What if $A\vv{x}=\vv{b}$ is inconsistent?
\begin{itemize}
\item Then there is no vector $\vv{x}$ such that $A\vv{x}=\vv{b}$.
\item Therfore $\vv{b}$ is \textbf{not} a linear combination of the columns of $A$.
\item Thus $\vv{b}$ is \textbf{not} in col$\(A\)$.
\end{itemize}
Can we find a vector $\hat{x}$ such that $\underbrace{A\hat{x}}_{\text{proj}_{\text{col}A}\vv{b}}$ is as close to $\vv{b}$ as possible?
\begin{center}
\begin{tikzpicture}
\begin{axis}[
width = 0.5\linewidth,
ymin = -2, ymax = 2,
xmin = -2, xmax = 2,
zmin = -2, zmax = 2,
axis line style = {ultra thick},
xlabel = {$x$},
ylabel = {$y$},
zlabel = {$z$},
clip = false,
3d box=background,
ticks = none,
grid=major,
]
\fill[red, fill=red, draw = red, fill opacity=0.2, ultra thick] (-2,-2,0) -- (2,-2,0) -- (2,2,0) node[above left, red, fill opacity=1] {col$A$} -- (-2,2,0) -- cycle;
\draw[dotted, black, ultra thick, latex-latex] (0,0,-2) -- (0,0,2) node[left, pos = 0] {$\(\text{col}A\)^\perp$};
\draw[blue, -latex, ultra thick] (0,0,0) -- (1,1,0) node[below, pos=0.5] {$\text{proj}_{\text{col}A}\vv{b}$};
\draw[black, -latex, ultra thick] (0,0,0) -- (1,1,1) node[right] {$\vv{b}$};
\draw[dotted, black, ultra thick] (1,1,1) -- (1,1,0);
\addplot3[mark=*, ultra thick] coordinates {(0,0,0)} node[left] {$\vv{0}$};
\addplot3[mark=*, ultra thick] coordinates {(1,1,1)};
\addplot3[mark=*, ultra thick] coordinates {(1,1,0)};
\end{axis}
\end{tikzpicture}
\end{center}
$\text{proj}_{\text{col}A}\vv{b}$ is the vector in col$A$ closest to $\vv{b}$. So we want $A\vv{x}=\text{proj}_{\text{col}A}\vv{b}$.
\begin{theorem}{Theorem: The Best Approximation Theorem}
Let $W$ be a subspace of $\mathbb{R}^n$ and let $\vv{y}$ be a vector in $\mathbb{R}^n$. Then $\text{proj}_W\vv{y}$ is the closest point in $W$ to $\vv{y}$:
$$\norm{\vv{y}-\text{proj}_W\vv{y}}<\norm{\vv{y}-\vv{v}}$$
for all vectors $\vv{v}$ in $W$ where $\vv{v}\ne\text{proj}_W\vv{y}$. \\ \newline
\textbf{Note:} $\norm{\vv{y}-\text{proj}_W\vv{y}}=\norm{\text{perp}_W\vv{y}}$ is the \textbf{distance} between $\vv{y}$ and $W$. \\ \newline
$\text{proj}_W\vv{y}$ is the \textbf{best approximation} of $\vv{y}$ in $W$.
\end{theorem}
\begin{definition}{Definition: Least Squared Solution}
Let $A$ be an $m\times n$ matrix and let $\vv{b}$ be a vector in $\mathbb{R}^m$. Then a \textbf{least squared solution} of $A\vv{x}=\vv{b}$ is a vector $\hat{x}$ such that
$$\norm{\vv{b}-A\hat{x}}\le\norm{\vv{b}-A\vv{x}}$$
for all $\vv{x}$ in $\mathbb{R}^n$.
\begin{itemize}
\item To get as close to $\vv{b}$ as possible while remaining in col$A$, we use $\text{proj}_{\text{col}A}\vv{b}$.
\item We want $A\hat{x}=\text{proj}_{\text{col}A}\vv{b}$
\item To solve for $\hat{x}$, we want to solve $A\vv{x}=\text{proj}_{\text{col}A}\vv{b}$.
\item The \textbf{error} in this approximation is $\norm{\vv{b}-A\vv{x}}$. Where $\vv{b}$ is the actual and $A\vv{x}$ is the approximation.
\end{itemize}
\end{definition}
\begin{example}{Example: Finding A Least Squared Solution The Long Way}
Let $A=\begin{bmatrix}1&0&0&1\\0&1&1&0\\1&0&0&1\end{bmatrix}$ and $\vv{b}=\begin{bmatrix}1\\2\\3\end{bmatrix}$. Find a least square solution to $A\vv{x}=\vv{b}$. \\ \newline
\textbf{Note:} $A\vv{x}=\vv{b}$ is inconsistent. \\ \newline
We need to calculate $\text{proj}_{\text{col}A}\vv{b}$:
\begin{enumerate}
\item {Find a basis for col$A$. If this basis is not an orthogonal basis, use Gram-Schmidt Process to create an orthogonal basis.}
\item {Use this basis and the projection formula to calculate $\text{proj}_{\text{col}A}\vv{b}$.
$$\text{proj}_{\text{col}A}\vv{b}=\begin{bmatrix}2\\2\\2\end{bmatrix}$$
We now need to solve $A\vv{x}=\text{proj}_{\text{col}A}\vv{b}$:
$$\begin{bmatrix}1&0&0&1\\0&1&1&0\\1&0&0&1\end{bmatrix}\vv{x}=\begin{bmatrix}2\\2\\2\end{bmatrix}$$
The least square solution set contains vectors of the form \fbox{$\hat{x}=\begin{bmatrix}2-t\\2-s\\s\\t\end{bmatrix}$} thus there are infinitely many solutions.}
\end{enumerate}
\end{example}
Consider $\vv{b}-A\hat{x}$ (where $\hat{x}$ is a least square solution to $A\vv{x}=\vv{b}$)
\begin{align*}
\vv{b}-A\hat{x}&=\vv{b}-\text{proj}_{\text{col}A}\vv{b} \\
&=\text{perp}_{\text{col}A}\vv{b}\in\(\text{col}A\)^\perp=\text{null}\(A^T\)
\end{align*}
So $\vv{b}-A\hat{x}$ is a vector in null$\(A^T\)=\left\{\vv{y}\left|A^T\vv{y}=\vv{0}\right.\right\}$. Therfore:
\begin{align*}
A^T\(\vv{b}-A\hat{x}\)&=\vv{0} \\
A^T\vv{b}-A^TA\hat{x}&=\vv{0} \\
A^T\vv{b}&=A^TA\hat{x}
\end{align*}
\begin{theorem}{Theorem: Easier Way Of Calculating A Least Squared Solution}
Let $A$ be an $m\times n$ matrix and let $\vv{b}$ be a vector in $\mathbb{R}^m$. The vector $\hat{x}$ in $\mathbb{R}^n$ is a least squares solution to $A\vv{x}=\vv{b}$ if and only if $\hat{x}$ is a solution to the normal equations
$$A^TA\vv{x}=A^T\vv{b}.$$
\end{theorem}
\begin{example}{Example: Finding A Least Squared Solution The Quicker Way}
Let $A=\begin{bmatrix}1&1\\0&1\\1&1\\0&1\end{bmatrix}$ and $\vv{b}=\begin{bmatrix}1\\1\\1\\3\end{bmatrix}$.
\begin{enumerate}
\item {Find all least squares solution to $A\vv{x}=\vv{b}$.
\begin{align*}
A^TA&=\begin{bmatrix}1&0&1&0\\1&1&1&1\end{bmatrix}\begin{bmatrix}1&1\\0&1\\1&1\\0&1\end{bmatrix}=\begin{bmatrix}2&2\\2&4\end{bmatrix} & A^T&=\begin{bmatrix}1&0&1&0\\1&1&1&1\end{bmatrix} \\
A^T\vv{b}&=\begin{bmatrix}1&0&1&0\\1&1&1&1\end{bmatrix}\begin{bmatrix}1\\1\\1\\3\end{bmatrix}=\begin{bmatrix}2\\0\end{bmatrix}
\end{align*}
Solve $\begin{bmatrix}2&2\\2&4\end{bmatrix}\vv{x}=\begin{bmatrix}2\\6\end{bmatrix}$:
\begin{enumerate}
\item{\textbf{Way 1: Row Reduction}
$$\begin{bmatrix}2&2&\aug&2\\2&4&\aug&6\end{bmatrix}\xrightarrow{R_2-R_1}\begin{bmatrix}2&2&\aug&2\\0&2&\aug&4\end{bmatrix}\xrightarrow[\frac{1}{2}R_2]{\frac{1}{2}R_1}\begin{bmatrix}1&1&\aug&1\\0&1&\aug&2\end{bmatrix}$$
\begin{align*}
x_1+x_2&=1 & x_1&=-1 \\
x_2&=2 & x_2&=2
\end{align*}
$$\hat{x}=\begin{bmatrix}-1\\2\end{bmatrix}$$}
\item {\textbf{Way 2: Invertible Matrix}
$$\vv{x}=\begin{bmatrix}2&2\\2&4\end{bmatrix}^{-1}\begin{bmatrix}2\\6\end{bmatrix}=\frac{1}{4}\begin{bmatrix}4&-2\\-2&2\end{bmatrix}\begin{bmatrix}2\\6\end{bmatrix}=\frac{1}{4}\begin{bmatrix}-4\\8\end{bmatrix}=\begin{bmatrix}-1\\2\end{bmatrix}$$}
\end{enumerate}
$\hat{x}=\begin{bmatrix}-1\\2\end{bmatrix}$ is the only least squares solution.}
\item {Calculate $\text{proj}_{\text{col}A}\vv{b}$:
$$\text{proj}_{\text{col}A}\vv{b}=A\vv{x}=\begin{bmatrix}1&1\\0&1\\1&1\\0&1\end{bmatrix}\begin{bmatrix}-1\\2\end{bmatrix}=\begin{bmatrix}1\\2\\1\\2\end{bmatrix}$$}
\item {What is the error in approximation?
\begin{align*}
\text{error}&=\norm{\vv{b}-A\hat{x}} \\
&=\norm{\begin{bmatrix}1\\1\\1\\3\end{bmatrix}-\begin{bmatrix}1\\2\\1\\2\end{bmatrix}}\\
&=\norm{\begin{bmatrix}0\\-1\\0\\1\end{bmatrix}} \\
&=\sqrt{0^2+\(-1\)^2+0^2+1^2} \\
&=\sqrt{2}
\end{align*}}
\end{enumerate}
\end{example}
\begin{example}{Example: Least Squared Solution With Infinite Solutions}
Find all least squares solutions to $A\vv{x}=\vv{b}$ where $A=\begin{bmatrix}1&1&0\\1&1&0\\1&0&1\\1&0&1\end{bmatrix}$ and $\vv{b}=\begin{bmatrix}1\\3\\8\\2\end{bmatrix}$. What vector is approximating $\vv{b}$? What is the error in the approximation?
\begin{enumerate}
\item{
\begin{align*}
A^TA&=\begin{bmatrix}4&2&2\\2&2&0\\2&0&2\end{bmatrix} \\
A^T\vv{b}&=\begin{bmatrix}14\\4\\10\end{bmatrix}
\end{align*}
Solve $A^TA\vv{x}=A^T\vv{b}$:
$$\begin{bmatrix}4&2&2\\2&2&0\\2&0&2\end{bmatrix}\hat{x}=\begin{bmatrix}14\\4\\10\end{bmatrix}$$
$$\begin{bmatrix}A^TA&\aug&A^T\vv{b}\end{bmatrix}=\begin{bmatrix}4&2&2&\aug&14\\2&2&0&\aug&4\\2&0&2&\aug&10\end{bmatrix}\xrightarrow{\text{Row Reduce}}\begin{bmatrix}1&0&1&\aug&5\\0&1&-1&\aug&-3\\0&0&0&\aug&0\end{bmatrix}$$
\begin{align*}
x_1+x_3&=5 & x_1&=5-t \\
x_2-x_3&=-3 & x_2&=-3+t \\
&&x_3&=t
\end{align*}
The least squared solution is \fbox{$\hat{x}=\begin{bmatrix}5-t\\-3+t\\t\end{bmatrix},t\in\mathbb{R}$}}
\item {$A\hat{x}$ is approximating $\vv{b}$:
$$A\vv{x}=\begin{bmatrix}1&1&0\\1&1&0\\1&0&1\\1&0&1\end{bmatrix}\begin{bmatrix}5-t\\-3+t\\t\end{bmatrix}=\underbrace{\begin{bmatrix}2\\2\\5\\5\end{bmatrix}}_{\text{proj}_{\text{col}}\vv{b}}$$}
\item {$\text{error}=\norm{\vv{b}-A\vv{x}}=\norm{\begin{bmatrix}1\\3\\8\\2\end{bmatrix}-\begin{bmatrix}2\\2\\5\\5\end{bmatrix}}=\norm{\begin{bmatrix}-1\\1\\3\\-3\end{bmatrix}}=\sqrt{20}$}
\end{enumerate}
\end{example}
\begin{example}{Example: Finding A Best Fit Line}
Consider the points (1,1), (2,3) and (4,2). Find an equation for the line that best fits these points. \\
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{$\begin{aligned}
y&=mx+b \\
1&=m(1)+b \\
3&=m(2)+b \\
2&=m(4)+b
\end{aligned}$} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -1.5, ymax = 4.5,
xmin = -1.5, xmax = 4.5,
axis line style = {latex-latex, ultra thick},
xtick = {-1,...,4},
ytick = {-1,...,4},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(1,1)};
\addplot[mark=*, ultra thick] coordinates {(2,3)};
\addplot[mark=*, ultra thick] coordinates {(4,2)};
\end{axis}
\end{tikzpicture}}}
\end{tblr}
$$\begin{aligned}m+b&=1\\2m+b&=3\\4m+b&=2\end{aligned}\qquad\underbrace{\begin{bmatrix}1&1\\2&1\\4&1\end{bmatrix}\begin{bmatrix}m\\b\end{bmatrix}=\begin{bmatrix}1\\3\\2\end{bmatrix}}_{A\vv{x}=\vv{c}\\\text{(No Solution)}}$$
Find the least squares solution set to $A\vv{x}=\vv{c}$: Solve $A^TA\vv{x}=A^T\vv{c}$.
\begin{align*}
A^TA&=\begin{bmatrix}1&2&4\\1&1&1\end{bmatrix}\begin{bmatrix}1&1\\2&1\\4&1\end{bmatrix}=\begin{bmatrix}21&7\\7&3\end{bmatrix} \\
A^T\vv{c}&=\begin{bmatrix}1&2&4\\1&1&1\end{bmatrix}\begin{bmatrix}1\\3\\2\end{bmatrix}=\begin{bmatrix}15\\6\end{bmatrix}
\end{align*}
Solve $\begin{bmatrix}21&7\\7&3\end{bmatrix}\begin{bmatrix}m\\b\end{bmatrix}=\begin{bmatrix}15\\6\end{bmatrix}$
$$\hat{x}=\frac{1}{14}\begin{bmatrix}3\\21\end{bmatrix}=\begin{bNiceMatrix}[last-col]3/14&\leftarrow m\\3/2&\leftarrow b\end{bNiceMatrix}$$
Line of Best Fit: $y=\dfrac{3}{14}x+\dfrac{3}{2}$ \\
\begin{tblr}{width=\linewidth,colspec={XX},cells={halign=c,valign=m}}
{$A\hat{x}=\begin{bmatrix}24/14\\27/14\\33/14\end{bmatrix}$} & {\raisebox{-.5\height}{\begin{tikzpicture}
\begin{axis} [
width = \linewidth,
axis x line=middle,
axis y line=middle,
ymin = -1.5, ymax = 4.5,
xmin = -1.5, xmax = 4.5,
axis line style = {latex-latex, ultra thick},
xtick = {-1,...,4},
ytick = {-1,...,4},
xlabel = {$x$},
ylabel = {$y$},
clip = false,
]
\addplot[mark=*, ultra thick] coordinates {(1,1)};
\addplot[mark=*, ultra thick] coordinates {(2,3)};
\addplot[mark=*, ultra thick] coordinates {(4,2)};
\addplot [
black,
samples = 200,
style = {latex-latex, red, thick},
domain = -1.5:4.5,
] {0.21428571428571428571428571428571*x+1.5};
\addplot[mark=*, ultra thick] coordinates {(1,1.7142857142857142857142857142857)};
\addplot[mark=*, ultra thick] coordinates {(2,1.9285714285714285714285714285714)};
\addplot[mark=*, ultra thick] coordinates {(4,2.3571428571428571428571428571429)};
\draw[black, dotted, ultra thick] (1,1) -- (1,1.7142857142857142857142857142857);
\draw[black, dotted, ultra thick] (2,3) -- (2,1.9285714285714285714285714285714);
\draw[black, dotted, ultra thick] (4,2) -- (4,2.3571428571428571428571428571429);
\end{axis}
\end{tikzpicture}}}
\end{tblr}
$\text{error}=\norm{\vv{c}-A\hat{x}}$
\end{example}
\end{flushleft}
\end{document}